var/home/core/zuul-output/0000755000175000017500000000000015111273675014535 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015111312201015454 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005225404415111312166017700 0ustar rootrootNov 25 09:36:33 crc systemd[1]: Starting Kubernetes Kubelet... Nov 25 09:36:33 crc restorecon[4698]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:36:33 crc restorecon[4698]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:36:33 crc restorecon[4698]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 25 09:36:34 crc kubenswrapper[4854]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 09:36:34 crc kubenswrapper[4854]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 25 09:36:34 crc kubenswrapper[4854]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 09:36:34 crc kubenswrapper[4854]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 09:36:34 crc kubenswrapper[4854]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 25 09:36:34 crc kubenswrapper[4854]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.757407 4854 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765217 4854 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765240 4854 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765245 4854 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765249 4854 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765254 4854 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765259 4854 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765264 4854 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765269 4854 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765274 4854 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765278 4854 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765282 4854 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765287 4854 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765291 4854 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765296 4854 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765300 4854 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765304 4854 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765308 4854 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765313 4854 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765318 4854 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765323 4854 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765326 4854 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765330 4854 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765334 4854 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765338 4854 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765342 4854 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765347 4854 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765359 4854 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765363 4854 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765367 4854 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765372 4854 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765376 4854 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765380 4854 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765385 4854 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765390 4854 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765394 4854 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765400 4854 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765404 4854 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765408 4854 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765413 4854 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765417 4854 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765421 4854 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765426 4854 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765429 4854 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765434 4854 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765438 4854 feature_gate.go:330] unrecognized feature gate: Example Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765442 4854 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765446 4854 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765450 4854 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765454 4854 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765458 4854 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765462 4854 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765466 4854 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765471 4854 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765474 4854 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765478 4854 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765483 4854 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765487 4854 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765492 4854 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765496 4854 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765501 4854 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765507 4854 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765512 4854 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765516 4854 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765522 4854 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765527 4854 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765532 4854 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765537 4854 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765542 4854 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765548 4854 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765552 4854 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.765556 4854 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.767873 4854 flags.go:64] FLAG: --address="0.0.0.0" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.767895 4854 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.767906 4854 flags.go:64] FLAG: --anonymous-auth="true" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.767913 4854 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.767920 4854 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.767925 4854 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.767932 4854 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.767939 4854 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.767945 4854 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.767950 4854 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.767955 4854 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.767961 4854 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.767968 4854 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.767974 4854 flags.go:64] FLAG: --cgroup-root="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.767979 4854 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.767984 4854 flags.go:64] FLAG: --client-ca-file="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.767988 4854 flags.go:64] FLAG: --cloud-config="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768000 4854 flags.go:64] FLAG: --cloud-provider="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768005 4854 flags.go:64] FLAG: --cluster-dns="[]" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768010 4854 flags.go:64] FLAG: --cluster-domain="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768015 4854 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768020 4854 flags.go:64] FLAG: --config-dir="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768025 4854 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768030 4854 flags.go:64] FLAG: --container-log-max-files="5" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768036 4854 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768040 4854 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768045 4854 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768050 4854 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768055 4854 flags.go:64] FLAG: --contention-profiling="false" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768059 4854 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768064 4854 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768069 4854 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768075 4854 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768081 4854 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768087 4854 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768092 4854 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768097 4854 flags.go:64] FLAG: --enable-load-reader="false" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768101 4854 flags.go:64] FLAG: --enable-server="true" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768106 4854 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768112 4854 flags.go:64] FLAG: --event-burst="100" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768117 4854 flags.go:64] FLAG: --event-qps="50" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768122 4854 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768127 4854 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768131 4854 flags.go:64] FLAG: --eviction-hard="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768137 4854 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768142 4854 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768147 4854 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768152 4854 flags.go:64] FLAG: --eviction-soft="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768157 4854 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768161 4854 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768166 4854 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768170 4854 flags.go:64] FLAG: --experimental-mounter-path="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768174 4854 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768179 4854 flags.go:64] FLAG: --fail-swap-on="true" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768183 4854 flags.go:64] FLAG: --feature-gates="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768192 4854 flags.go:64] FLAG: --file-check-frequency="20s" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768197 4854 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768202 4854 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768209 4854 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768214 4854 flags.go:64] FLAG: --healthz-port="10248" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768219 4854 flags.go:64] FLAG: --help="false" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768224 4854 flags.go:64] FLAG: --hostname-override="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768229 4854 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768234 4854 flags.go:64] FLAG: --http-check-frequency="20s" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768239 4854 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768243 4854 flags.go:64] FLAG: --image-credential-provider-config="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768247 4854 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768251 4854 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768256 4854 flags.go:64] FLAG: --image-service-endpoint="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768260 4854 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768264 4854 flags.go:64] FLAG: --kube-api-burst="100" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768269 4854 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768274 4854 flags.go:64] FLAG: --kube-api-qps="50" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768278 4854 flags.go:64] FLAG: --kube-reserved="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768283 4854 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768287 4854 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768292 4854 flags.go:64] FLAG: --kubelet-cgroups="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768297 4854 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768303 4854 flags.go:64] FLAG: --lock-file="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768308 4854 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768312 4854 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768317 4854 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768324 4854 flags.go:64] FLAG: --log-json-split-stream="false" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768328 4854 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768332 4854 flags.go:64] FLAG: --log-text-split-stream="false" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768337 4854 flags.go:64] FLAG: --logging-format="text" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768343 4854 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768349 4854 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768354 4854 flags.go:64] FLAG: --manifest-url="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768360 4854 flags.go:64] FLAG: --manifest-url-header="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768366 4854 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768371 4854 flags.go:64] FLAG: --max-open-files="1000000" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768377 4854 flags.go:64] FLAG: --max-pods="110" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768382 4854 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768386 4854 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768390 4854 flags.go:64] FLAG: --memory-manager-policy="None" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768395 4854 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768399 4854 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768404 4854 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768408 4854 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768419 4854 flags.go:64] FLAG: --node-status-max-images="50" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768423 4854 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768428 4854 flags.go:64] FLAG: --oom-score-adj="-999" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768432 4854 flags.go:64] FLAG: --pod-cidr="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768437 4854 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768444 4854 flags.go:64] FLAG: --pod-manifest-path="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768448 4854 flags.go:64] FLAG: --pod-max-pids="-1" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768453 4854 flags.go:64] FLAG: --pods-per-core="0" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768457 4854 flags.go:64] FLAG: --port="10250" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768462 4854 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768466 4854 flags.go:64] FLAG: --provider-id="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768471 4854 flags.go:64] FLAG: --qos-reserved="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768476 4854 flags.go:64] FLAG: --read-only-port="10255" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768481 4854 flags.go:64] FLAG: --register-node="true" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768486 4854 flags.go:64] FLAG: --register-schedulable="true" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768491 4854 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768500 4854 flags.go:64] FLAG: --registry-burst="10" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768506 4854 flags.go:64] FLAG: --registry-qps="5" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768512 4854 flags.go:64] FLAG: --reserved-cpus="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768516 4854 flags.go:64] FLAG: --reserved-memory="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768523 4854 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768528 4854 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768534 4854 flags.go:64] FLAG: --rotate-certificates="false" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768539 4854 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768544 4854 flags.go:64] FLAG: --runonce="false" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768550 4854 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768555 4854 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768560 4854 flags.go:64] FLAG: --seccomp-default="false" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768565 4854 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768570 4854 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768576 4854 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768581 4854 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768587 4854 flags.go:64] FLAG: --storage-driver-password="root" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768592 4854 flags.go:64] FLAG: --storage-driver-secure="false" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768597 4854 flags.go:64] FLAG: --storage-driver-table="stats" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768602 4854 flags.go:64] FLAG: --storage-driver-user="root" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768607 4854 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768613 4854 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768618 4854 flags.go:64] FLAG: --system-cgroups="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768623 4854 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768631 4854 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768635 4854 flags.go:64] FLAG: --tls-cert-file="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768640 4854 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768652 4854 flags.go:64] FLAG: --tls-min-version="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768657 4854 flags.go:64] FLAG: --tls-private-key-file="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768662 4854 flags.go:64] FLAG: --topology-manager-policy="none" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768667 4854 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768688 4854 flags.go:64] FLAG: --topology-manager-scope="container" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768693 4854 flags.go:64] FLAG: --v="2" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768699 4854 flags.go:64] FLAG: --version="false" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768706 4854 flags.go:64] FLAG: --vmodule="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768711 4854 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.768716 4854 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.768823 4854 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.768829 4854 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.768833 4854 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.768837 4854 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.768841 4854 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.768844 4854 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.768848 4854 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.768852 4854 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.768856 4854 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.768861 4854 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.768865 4854 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.768870 4854 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.768876 4854 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.768882 4854 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.768888 4854 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.768893 4854 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.768897 4854 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.768901 4854 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.768905 4854 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.768910 4854 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.768914 4854 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.768920 4854 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.768926 4854 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.768931 4854 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.768937 4854 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.768943 4854 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.768948 4854 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.768953 4854 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.768957 4854 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.768961 4854 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.768966 4854 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.768971 4854 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.768975 4854 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.768979 4854 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.768984 4854 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.768988 4854 feature_gate.go:330] unrecognized feature gate: Example Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.768993 4854 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.768997 4854 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.769001 4854 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.769006 4854 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.769010 4854 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.769015 4854 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.769020 4854 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.769025 4854 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.769030 4854 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.769035 4854 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.769039 4854 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.769043 4854 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.769053 4854 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.769058 4854 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.769061 4854 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.769065 4854 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.769069 4854 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.769073 4854 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.769078 4854 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.769081 4854 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.769085 4854 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.769089 4854 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.769093 4854 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.769097 4854 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.769100 4854 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.769104 4854 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.769108 4854 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.769112 4854 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.769116 4854 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.769119 4854 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.769123 4854 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.769127 4854 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.769131 4854 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.769135 4854 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.769138 4854 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.769151 4854 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.779335 4854 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.779366 4854 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779432 4854 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779439 4854 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779444 4854 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779448 4854 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779452 4854 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779456 4854 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779459 4854 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779463 4854 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779467 4854 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779471 4854 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779474 4854 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779478 4854 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779481 4854 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779485 4854 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779488 4854 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779492 4854 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779495 4854 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779499 4854 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779502 4854 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779506 4854 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779509 4854 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779514 4854 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779519 4854 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779523 4854 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779526 4854 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779530 4854 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779535 4854 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779541 4854 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779544 4854 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779549 4854 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779555 4854 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779560 4854 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779564 4854 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779567 4854 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779571 4854 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779578 4854 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779583 4854 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779588 4854 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779592 4854 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779596 4854 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779600 4854 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779603 4854 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779607 4854 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779611 4854 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779614 4854 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779617 4854 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779621 4854 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779624 4854 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779628 4854 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779631 4854 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779634 4854 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779638 4854 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779642 4854 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779645 4854 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779649 4854 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779652 4854 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779656 4854 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779660 4854 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779663 4854 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779696 4854 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779703 4854 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779707 4854 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779712 4854 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779716 4854 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779719 4854 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779723 4854 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779727 4854 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779731 4854 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779735 4854 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779738 4854 feature_gate.go:330] unrecognized feature gate: Example Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779742 4854 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.779751 4854 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779858 4854 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779865 4854 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779869 4854 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779873 4854 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779878 4854 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779883 4854 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779887 4854 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779891 4854 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779895 4854 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779900 4854 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779904 4854 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779909 4854 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779912 4854 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779916 4854 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779920 4854 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779925 4854 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779928 4854 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779932 4854 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779936 4854 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779939 4854 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779942 4854 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779946 4854 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779949 4854 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779953 4854 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779957 4854 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779960 4854 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779964 4854 feature_gate.go:330] unrecognized feature gate: Example Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779967 4854 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779970 4854 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779974 4854 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779977 4854 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779981 4854 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779984 4854 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779988 4854 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779991 4854 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779995 4854 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.779998 4854 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.780002 4854 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.780005 4854 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.780008 4854 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.780012 4854 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.780015 4854 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.780019 4854 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.780022 4854 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.780026 4854 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.780029 4854 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.780033 4854 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.780037 4854 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.780040 4854 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.780044 4854 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.780047 4854 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.780050 4854 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.780054 4854 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.780059 4854 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.780063 4854 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.780067 4854 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.780071 4854 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.780075 4854 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.780079 4854 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.780083 4854 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.780087 4854 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.780092 4854 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.780095 4854 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.780100 4854 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.780104 4854 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.780107 4854 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.780111 4854 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.780115 4854 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.780119 4854 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.780122 4854 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.780125 4854 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.780131 4854 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.780273 4854 server.go:940] "Client rotation is on, will bootstrap in background" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.790171 4854 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.790348 4854 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.792862 4854 server.go:997] "Starting client certificate rotation" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.792910 4854 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.794569 4854 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-11-14 18:27:36.336184513 +0000 UTC Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.794632 4854 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.822688 4854 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.824455 4854 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 25 09:36:34 crc kubenswrapper[4854]: E1125 09:36:34.826751 4854 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.184:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.842835 4854 log.go:25] "Validated CRI v1 runtime API" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.879358 4854 log.go:25] "Validated CRI v1 image API" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.881489 4854 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.887826 4854 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-25-09-31-16-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.887892 4854 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:41 fsType:tmpfs blockSize:0}] Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.914276 4854 manager.go:217] Machine: {Timestamp:2025-11-25 09:36:34.908202552 +0000 UTC m=+0.761196008 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2799998 MemoryCapacity:33654128640 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:9ec5d79d-dba9-49c0-8c51-26f030e53128 BootID:a363dd8e-616a-41fb-b3a6-8f9b7ff40e37 Filesystems:[{Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:41 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:4e:56:07 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:4e:56:07 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:f1:ff:90 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:78:ab:e8 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:54:fa:65 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:16:3b:14 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:96:fa:17:ce:01:cb Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:7e:1e:37:0c:5b:e1 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654128640 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.914798 4854 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.915035 4854 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.917711 4854 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.918034 4854 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.918099 4854 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.918442 4854 topology_manager.go:138] "Creating topology manager with none policy" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.918459 4854 container_manager_linux.go:303] "Creating device plugin manager" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.918991 4854 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.919037 4854 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.919405 4854 state_mem.go:36] "Initialized new in-memory state store" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.919556 4854 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.927180 4854 kubelet.go:418] "Attempting to sync node with API server" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.927218 4854 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.927308 4854 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.927334 4854 kubelet.go:324] "Adding apiserver pod source" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.927361 4854 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.934205 4854 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.184:6443: connect: connection refused Nov 25 09:36:34 crc kubenswrapper[4854]: E1125 09:36:34.934338 4854 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.184:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.934202 4854 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.184:6443: connect: connection refused Nov 25 09:36:34 crc kubenswrapper[4854]: E1125 09:36:34.934387 4854 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.184:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.937307 4854 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.939017 4854 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.941050 4854 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.942721 4854 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.942762 4854 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.942777 4854 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.942791 4854 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.942814 4854 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.942828 4854 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.942842 4854 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.942864 4854 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.942881 4854 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.942896 4854 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.942916 4854 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.942931 4854 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.943836 4854 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.944534 4854 server.go:1280] "Started kubelet" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.945605 4854 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.945834 4854 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.945920 4854 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.184:6443: connect: connection refused Nov 25 09:36:34 crc systemd[1]: Started Kubernetes Kubelet. Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.946560 4854 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.948557 4854 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.948598 4854 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.948726 4854 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-13 03:04:54.240884311 +0000 UTC Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.948819 4854 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 1169h28m19.292070643s for next certificate rotation Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.948845 4854 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.948858 4854 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.948991 4854 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 25 09:36:34 crc kubenswrapper[4854]: E1125 09:36:34.949157 4854 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.949220 4854 server.go:460] "Adding debug handlers to kubelet server" Nov 25 09:36:34 crc kubenswrapper[4854]: W1125 09:36:34.949938 4854 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.184:6443: connect: connection refused Nov 25 09:36:34 crc kubenswrapper[4854]: E1125 09:36:34.950043 4854 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.184:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.950440 4854 factory.go:55] Registering systemd factory Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.950474 4854 factory.go:221] Registration of the systemd container factory successfully Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.950869 4854 factory.go:153] Registering CRI-O factory Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.950909 4854 factory.go:221] Registration of the crio container factory successfully Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.951004 4854 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.951047 4854 factory.go:103] Registering Raw factory Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.951070 4854 manager.go:1196] Started watching for new ooms in manager Nov 25 09:36:34 crc kubenswrapper[4854]: E1125 09:36:34.953227 4854 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.184:6443: connect: connection refused" interval="200ms" Nov 25 09:36:34 crc kubenswrapper[4854]: E1125 09:36:34.953330 4854 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.184:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187b364fb0ef9b50 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 09:36:34.94449032 +0000 UTC m=+0.797483726,LastTimestamp:2025-11-25 09:36:34.94449032 +0000 UTC m=+0.797483726,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.958422 4854 manager.go:319] Starting recovery of all containers Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.964478 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.964525 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.964535 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.964566 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.964577 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.964587 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.964596 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.964605 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.964615 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.964624 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.964634 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.964644 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.964653 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.964664 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.964686 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.964699 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.966620 4854 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.966655 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.966686 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.966701 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.966714 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.966727 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.966740 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.966754 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.966769 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.966784 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.966800 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.966817 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.966833 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.966872 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.966885 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.966897 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.966909 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.966922 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.966954 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.966966 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.966978 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.966990 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967003 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967017 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967029 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967041 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967055 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967067 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967080 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967093 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967105 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967119 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967133 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967147 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967160 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967172 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967184 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967203 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967217 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967233 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967246 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967261 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967274 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967285 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967300 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967313 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967328 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967341 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967360 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967373 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967386 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967399 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967413 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967426 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967439 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967454 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967466 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967480 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967493 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967506 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967518 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967532 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967545 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967557 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967570 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967582 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967595 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967608 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967621 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967634 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967646 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967660 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967693 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967707 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967733 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967750 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967763 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967778 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967792 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967805 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967819 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967831 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967843 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967854 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967865 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967875 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967884 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967900 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967909 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967950 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967961 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967972 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967982 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.967994 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968005 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968015 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968026 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968037 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968048 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968058 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968069 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968080 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968090 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968100 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968111 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968121 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968132 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968144 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968157 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968169 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968180 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968190 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968203 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968216 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968228 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968238 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968248 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968257 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968271 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968285 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968294 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968305 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968315 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968325 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968334 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968345 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968355 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968364 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968374 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968385 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968395 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968406 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968416 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968426 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968438 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968450 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968462 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968472 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968483 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968493 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968502 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968512 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968524 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968534 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968543 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968553 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968563 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968572 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968583 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968594 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968605 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968618 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968629 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968639 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968649 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968659 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968688 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968700 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968710 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968723 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968733 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968744 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968754 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968763 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968774 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968784 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968793 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968804 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968814 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968824 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968834 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968844 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968854 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968863 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968872 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968882 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968891 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968900 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968909 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968919 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968929 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968938 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968948 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968961 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968971 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968981 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.968991 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.969001 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.969011 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.969021 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.969031 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.969040 4854 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.969049 4854 reconstruct.go:97] "Volume reconstruction finished" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.969059 4854 reconciler.go:26] "Reconciler: start to sync state" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.979781 4854 manager.go:324] Recovery completed Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.990158 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.992310 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.992369 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.992389 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.994801 4854 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.994839 4854 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 25 09:36:34 crc kubenswrapper[4854]: I1125 09:36:34.994867 4854 state_mem.go:36] "Initialized new in-memory state store" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.009416 4854 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.012054 4854 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.012115 4854 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.012147 4854 kubelet.go:2335] "Starting kubelet main sync loop" Nov 25 09:36:35 crc kubenswrapper[4854]: E1125 09:36:35.012196 4854 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 25 09:36:35 crc kubenswrapper[4854]: W1125 09:36:35.012825 4854 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.184:6443: connect: connection refused Nov 25 09:36:35 crc kubenswrapper[4854]: E1125 09:36:35.012874 4854 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.184:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.013181 4854 policy_none.go:49] "None policy: Start" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.014778 4854 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.014810 4854 state_mem.go:35] "Initializing new in-memory state store" Nov 25 09:36:35 crc kubenswrapper[4854]: E1125 09:36:35.049328 4854 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.064994 4854 manager.go:334] "Starting Device Plugin manager" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.065080 4854 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.065096 4854 server.go:79] "Starting device plugin registration server" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.065574 4854 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.065595 4854 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.065799 4854 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.065950 4854 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.066017 4854 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 25 09:36:35 crc kubenswrapper[4854]: E1125 09:36:35.075771 4854 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.112739 4854 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.112890 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.114275 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.114368 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.114457 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.114616 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.114916 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.114971 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.117523 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.117545 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.117560 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.117573 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.117563 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.117700 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.117739 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.117930 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.117988 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.118584 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.118631 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.118643 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.118874 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.118994 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.119033 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.119721 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.119745 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.119755 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.120980 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.121009 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.121022 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.121058 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.121086 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.121097 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.121204 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.121366 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.121427 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.122174 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.122197 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.122207 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.122176 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.122281 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.122313 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.122338 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.122361 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.123090 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.123120 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.123128 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:35 crc kubenswrapper[4854]: E1125 09:36:35.156211 4854 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.184:6443: connect: connection refused" interval="400ms" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.166171 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.167640 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.167714 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.167731 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.167764 4854 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 09:36:35 crc kubenswrapper[4854]: E1125 09:36:35.168440 4854 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.184:6443: connect: connection refused" node="crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.171170 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.171219 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.171252 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.171284 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.171321 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.171352 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.171414 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.171468 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.171497 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.171513 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.171529 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.171546 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.171564 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.171581 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.171595 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: E1125 09:36:35.252479 4854 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.184:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187b364fb0ef9b50 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 09:36:34.94449032 +0000 UTC m=+0.797483726,LastTimestamp:2025-11-25 09:36:34.94449032 +0000 UTC m=+0.797483726,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.272810 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.272894 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.272921 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.272940 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.272963 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.272984 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.273007 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.273030 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.273050 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.273072 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.273095 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.273115 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.273115 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.273137 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.273167 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.273192 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.273115 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.273352 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.273390 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.273414 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.273458 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.273492 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.273540 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.273506 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.273515 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.273582 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.273566 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.273567 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.273571 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.273635 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.368761 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.370356 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.370436 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.370455 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.370497 4854 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 09:36:35 crc kubenswrapper[4854]: E1125 09:36:35.371146 4854 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.184:6443: connect: connection refused" node="crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.468558 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.486951 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: W1125 09:36:35.504274 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-fca5c0ca5199c312e62f6867820aae60d52d45ab055636ec60f21b0f483f4866 WatchSource:0}: Error finding container fca5c0ca5199c312e62f6867820aae60d52d45ab055636ec60f21b0f483f4866: Status 404 returned error can't find the container with id fca5c0ca5199c312e62f6867820aae60d52d45ab055636ec60f21b0f483f4866 Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.509842 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.516532 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: W1125 09:36:35.531338 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-42ee95833752973322b26ecec2e3c5ae090eb1437d38c490820c71b8d43d0034 WatchSource:0}: Error finding container 42ee95833752973322b26ecec2e3c5ae090eb1437d38c490820c71b8d43d0034: Status 404 returned error can't find the container with id 42ee95833752973322b26ecec2e3c5ae090eb1437d38c490820c71b8d43d0034 Nov 25 09:36:35 crc kubenswrapper[4854]: W1125 09:36:35.533105 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-f7187339fff2a3485de60c47b5797619d2a3aeff97db78e0ee6e38dcfa2129ec WatchSource:0}: Error finding container f7187339fff2a3485de60c47b5797619d2a3aeff97db78e0ee6e38dcfa2129ec: Status 404 returned error can't find the container with id f7187339fff2a3485de60c47b5797619d2a3aeff97db78e0ee6e38dcfa2129ec Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.537941 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:36:35 crc kubenswrapper[4854]: W1125 09:36:35.555610 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-66c002ee2814b2a5f66a6fd3d15046666b218571b4dc76eaee8019201e046914 WatchSource:0}: Error finding container 66c002ee2814b2a5f66a6fd3d15046666b218571b4dc76eaee8019201e046914: Status 404 returned error can't find the container with id 66c002ee2814b2a5f66a6fd3d15046666b218571b4dc76eaee8019201e046914 Nov 25 09:36:35 crc kubenswrapper[4854]: E1125 09:36:35.557805 4854 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.184:6443: connect: connection refused" interval="800ms" Nov 25 09:36:35 crc kubenswrapper[4854]: W1125 09:36:35.740221 4854 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.184:6443: connect: connection refused Nov 25 09:36:35 crc kubenswrapper[4854]: E1125 09:36:35.740354 4854 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.184:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.772135 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.774343 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.774382 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.774396 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.774424 4854 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 09:36:35 crc kubenswrapper[4854]: E1125 09:36:35.774894 4854 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.184:6443: connect: connection refused" node="crc" Nov 25 09:36:35 crc kubenswrapper[4854]: I1125 09:36:35.947657 4854 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.184:6443: connect: connection refused Nov 25 09:36:36 crc kubenswrapper[4854]: I1125 09:36:36.016301 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"42ee95833752973322b26ecec2e3c5ae090eb1437d38c490820c71b8d43d0034"} Nov 25 09:36:36 crc kubenswrapper[4854]: I1125 09:36:36.017442 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"f7187339fff2a3485de60c47b5797619d2a3aeff97db78e0ee6e38dcfa2129ec"} Nov 25 09:36:36 crc kubenswrapper[4854]: I1125 09:36:36.018301 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"34ee3b4ca54b611a6a6e985af30fe3760b29ef09369b65d0cccb9297fdd6dad5"} Nov 25 09:36:36 crc kubenswrapper[4854]: I1125 09:36:36.019135 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"fca5c0ca5199c312e62f6867820aae60d52d45ab055636ec60f21b0f483f4866"} Nov 25 09:36:36 crc kubenswrapper[4854]: I1125 09:36:36.020417 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"66c002ee2814b2a5f66a6fd3d15046666b218571b4dc76eaee8019201e046914"} Nov 25 09:36:36 crc kubenswrapper[4854]: W1125 09:36:36.292429 4854 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.184:6443: connect: connection refused Nov 25 09:36:36 crc kubenswrapper[4854]: E1125 09:36:36.292589 4854 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.184:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:36:36 crc kubenswrapper[4854]: E1125 09:36:36.359123 4854 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.184:6443: connect: connection refused" interval="1.6s" Nov 25 09:36:36 crc kubenswrapper[4854]: W1125 09:36:36.385635 4854 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.184:6443: connect: connection refused Nov 25 09:36:36 crc kubenswrapper[4854]: E1125 09:36:36.385736 4854 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.184:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:36:36 crc kubenswrapper[4854]: W1125 09:36:36.528095 4854 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.184:6443: connect: connection refused Nov 25 09:36:36 crc kubenswrapper[4854]: E1125 09:36:36.528493 4854 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.184:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:36:36 crc kubenswrapper[4854]: I1125 09:36:36.575197 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:36 crc kubenswrapper[4854]: I1125 09:36:36.577027 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:36 crc kubenswrapper[4854]: I1125 09:36:36.577090 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:36 crc kubenswrapper[4854]: I1125 09:36:36.577108 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:36 crc kubenswrapper[4854]: I1125 09:36:36.577140 4854 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 09:36:36 crc kubenswrapper[4854]: E1125 09:36:36.577698 4854 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.184:6443: connect: connection refused" node="crc" Nov 25 09:36:36 crc kubenswrapper[4854]: I1125 09:36:36.860019 4854 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Nov 25 09:36:36 crc kubenswrapper[4854]: E1125 09:36:36.861484 4854 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.184:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:36:36 crc kubenswrapper[4854]: I1125 09:36:36.946840 4854 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.184:6443: connect: connection refused Nov 25 09:36:37 crc kubenswrapper[4854]: I1125 09:36:37.025536 4854 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="5da35c31b62af5bdf61bdf6129d180f94471966ebc34c2616cf3b47b55c4136f" exitCode=0 Nov 25 09:36:37 crc kubenswrapper[4854]: I1125 09:36:37.025688 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"5da35c31b62af5bdf61bdf6129d180f94471966ebc34c2616cf3b47b55c4136f"} Nov 25 09:36:37 crc kubenswrapper[4854]: I1125 09:36:37.025896 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:37 crc kubenswrapper[4854]: I1125 09:36:37.027687 4854 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="a95ad1710fb73417560894871bf4a2a02c511595f2ecbddeb38888960b57f955" exitCode=0 Nov 25 09:36:37 crc kubenswrapper[4854]: I1125 09:36:37.027739 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"a95ad1710fb73417560894871bf4a2a02c511595f2ecbddeb38888960b57f955"} Nov 25 09:36:37 crc kubenswrapper[4854]: I1125 09:36:37.027840 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:37 crc kubenswrapper[4854]: I1125 09:36:37.028795 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:37 crc kubenswrapper[4854]: I1125 09:36:37.028840 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:37 crc kubenswrapper[4854]: I1125 09:36:37.028858 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:37 crc kubenswrapper[4854]: I1125 09:36:37.029204 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:37 crc kubenswrapper[4854]: I1125 09:36:37.029286 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:37 crc kubenswrapper[4854]: I1125 09:36:37.029363 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:37 crc kubenswrapper[4854]: I1125 09:36:37.030313 4854 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="a541e16003f5246d94c8e614253336b755097bbb7989b98efeffba79091aa5ab" exitCode=0 Nov 25 09:36:37 crc kubenswrapper[4854]: I1125 09:36:37.030413 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"a541e16003f5246d94c8e614253336b755097bbb7989b98efeffba79091aa5ab"} Nov 25 09:36:37 crc kubenswrapper[4854]: I1125 09:36:37.030431 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:37 crc kubenswrapper[4854]: I1125 09:36:37.033002 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:37 crc kubenswrapper[4854]: I1125 09:36:37.033032 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:37 crc kubenswrapper[4854]: I1125 09:36:37.033045 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:37 crc kubenswrapper[4854]: I1125 09:36:37.034037 4854 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c" exitCode=0 Nov 25 09:36:37 crc kubenswrapper[4854]: I1125 09:36:37.034127 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:37 crc kubenswrapper[4854]: I1125 09:36:37.034129 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c"} Nov 25 09:36:37 crc kubenswrapper[4854]: I1125 09:36:37.034969 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:37 crc kubenswrapper[4854]: I1125 09:36:37.034988 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:37 crc kubenswrapper[4854]: I1125 09:36:37.034998 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:37 crc kubenswrapper[4854]: I1125 09:36:37.036314 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:37 crc kubenswrapper[4854]: I1125 09:36:37.036972 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:37 crc kubenswrapper[4854]: I1125 09:36:37.037007 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:37 crc kubenswrapper[4854]: I1125 09:36:37.037016 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:37 crc kubenswrapper[4854]: I1125 09:36:37.038150 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"53b3a5ae9621d23b07fe07044dba09946f559dc5b8e75d8f1b6a2ccd0c672247"} Nov 25 09:36:37 crc kubenswrapper[4854]: I1125 09:36:37.038364 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"64018e6a7f7cdfcdd8339c676ebae79d9047a099a69048351406278d2a142863"} Nov 25 09:36:37 crc kubenswrapper[4854]: I1125 09:36:37.038377 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"c67a2da54854403d420ac7c05ea4211260914f5123d6ac5f086e22b88256a331"} Nov 25 09:36:37 crc kubenswrapper[4854]: I1125 09:36:37.038388 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116"} Nov 25 09:36:37 crc kubenswrapper[4854]: I1125 09:36:37.038595 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:37 crc kubenswrapper[4854]: I1125 09:36:37.039976 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:37 crc kubenswrapper[4854]: I1125 09:36:37.040032 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:37 crc kubenswrapper[4854]: I1125 09:36:37.040055 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:37 crc kubenswrapper[4854]: I1125 09:36:37.947032 4854 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.184:6443: connect: connection refused Nov 25 09:36:37 crc kubenswrapper[4854]: E1125 09:36:37.960664 4854 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.184:6443: connect: connection refused" interval="3.2s" Nov 25 09:36:38 crc kubenswrapper[4854]: I1125 09:36:38.045799 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89"} Nov 25 09:36:38 crc kubenswrapper[4854]: I1125 09:36:38.045852 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831"} Nov 25 09:36:38 crc kubenswrapper[4854]: I1125 09:36:38.045864 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e"} Nov 25 09:36:38 crc kubenswrapper[4854]: I1125 09:36:38.045874 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898"} Nov 25 09:36:38 crc kubenswrapper[4854]: I1125 09:36:38.047482 4854 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="061a8c1896bde9f6ed00a4e97495a807f76686ded2ed3bed7a094df185238da2" exitCode=0 Nov 25 09:36:38 crc kubenswrapper[4854]: I1125 09:36:38.047540 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"061a8c1896bde9f6ed00a4e97495a807f76686ded2ed3bed7a094df185238da2"} Nov 25 09:36:38 crc kubenswrapper[4854]: I1125 09:36:38.047599 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:38 crc kubenswrapper[4854]: I1125 09:36:38.048746 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:38 crc kubenswrapper[4854]: I1125 09:36:38.048789 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:38 crc kubenswrapper[4854]: I1125 09:36:38.048803 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:38 crc kubenswrapper[4854]: I1125 09:36:38.051259 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"b5c4f75292620c27c4438230c331ae62d3c130ede9379157f6bc31e2a0345776"} Nov 25 09:36:38 crc kubenswrapper[4854]: I1125 09:36:38.051358 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:38 crc kubenswrapper[4854]: I1125 09:36:38.052385 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:38 crc kubenswrapper[4854]: I1125 09:36:38.052406 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:38 crc kubenswrapper[4854]: I1125 09:36:38.052415 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:38 crc kubenswrapper[4854]: I1125 09:36:38.056510 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:38 crc kubenswrapper[4854]: I1125 09:36:38.056509 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"367de262895fba6dd5ef497f51059236c7b388d54847ea16f79bc1157017b0ee"} Nov 25 09:36:38 crc kubenswrapper[4854]: I1125 09:36:38.056612 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"ccf28beb4109e4bf20284e9549e2063585667d175f20d55ccce18b7b7441e4e0"} Nov 25 09:36:38 crc kubenswrapper[4854]: I1125 09:36:38.056627 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"970f252dcc9145faeafc8d8bd7ac63b329710552096733720038b3cc76a6739e"} Nov 25 09:36:38 crc kubenswrapper[4854]: I1125 09:36:38.056519 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:38 crc kubenswrapper[4854]: I1125 09:36:38.057430 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:38 crc kubenswrapper[4854]: I1125 09:36:38.057471 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:38 crc kubenswrapper[4854]: I1125 09:36:38.057519 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:38 crc kubenswrapper[4854]: I1125 09:36:38.058022 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:38 crc kubenswrapper[4854]: I1125 09:36:38.059381 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:38 crc kubenswrapper[4854]: I1125 09:36:38.059413 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:38 crc kubenswrapper[4854]: I1125 09:36:38.178613 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:38 crc kubenswrapper[4854]: I1125 09:36:38.179940 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:38 crc kubenswrapper[4854]: I1125 09:36:38.179987 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:38 crc kubenswrapper[4854]: I1125 09:36:38.180005 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:38 crc kubenswrapper[4854]: I1125 09:36:38.180034 4854 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 09:36:38 crc kubenswrapper[4854]: E1125 09:36:38.180627 4854 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.184:6443: connect: connection refused" node="crc" Nov 25 09:36:38 crc kubenswrapper[4854]: W1125 09:36:38.264071 4854 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.184:6443: connect: connection refused Nov 25 09:36:38 crc kubenswrapper[4854]: E1125 09:36:38.264146 4854 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.184:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:36:38 crc kubenswrapper[4854]: W1125 09:36:38.550292 4854 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.184:6443: connect: connection refused Nov 25 09:36:38 crc kubenswrapper[4854]: E1125 09:36:38.550388 4854 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.184:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:36:38 crc kubenswrapper[4854]: W1125 09:36:38.631835 4854 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.184:6443: connect: connection refused Nov 25 09:36:38 crc kubenswrapper[4854]: E1125 09:36:38.631911 4854 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.184:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:36:38 crc kubenswrapper[4854]: I1125 09:36:38.947460 4854 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.184:6443: connect: connection refused Nov 25 09:36:38 crc kubenswrapper[4854]: I1125 09:36:38.958058 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:36:39 crc kubenswrapper[4854]: I1125 09:36:39.061205 4854 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="e25e30401cb74467455d88fa8c44eb3a63cb6844edf50c703112e9ab3e7d97a4" exitCode=0 Nov 25 09:36:39 crc kubenswrapper[4854]: I1125 09:36:39.061286 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"e25e30401cb74467455d88fa8c44eb3a63cb6844edf50c703112e9ab3e7d97a4"} Nov 25 09:36:39 crc kubenswrapper[4854]: I1125 09:36:39.061305 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:39 crc kubenswrapper[4854]: I1125 09:36:39.062041 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:39 crc kubenswrapper[4854]: I1125 09:36:39.062068 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:39 crc kubenswrapper[4854]: I1125 09:36:39.062076 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:39 crc kubenswrapper[4854]: I1125 09:36:39.064280 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1"} Nov 25 09:36:39 crc kubenswrapper[4854]: I1125 09:36:39.064329 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:39 crc kubenswrapper[4854]: I1125 09:36:39.064384 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:39 crc kubenswrapper[4854]: I1125 09:36:39.064507 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:39 crc kubenswrapper[4854]: I1125 09:36:39.065267 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:39 crc kubenswrapper[4854]: I1125 09:36:39.065285 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:39 crc kubenswrapper[4854]: I1125 09:36:39.065293 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:39 crc kubenswrapper[4854]: I1125 09:36:39.065300 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:39 crc kubenswrapper[4854]: I1125 09:36:39.065325 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:39 crc kubenswrapper[4854]: I1125 09:36:39.065335 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:39 crc kubenswrapper[4854]: I1125 09:36:39.065705 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:39 crc kubenswrapper[4854]: I1125 09:36:39.065736 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:39 crc kubenswrapper[4854]: I1125 09:36:39.065748 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:39 crc kubenswrapper[4854]: W1125 09:36:39.164125 4854 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.184:6443: connect: connection refused Nov 25 09:36:39 crc kubenswrapper[4854]: E1125 09:36:39.164215 4854 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.184:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:36:39 crc kubenswrapper[4854]: I1125 09:36:39.285040 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:36:39 crc kubenswrapper[4854]: I1125 09:36:39.761356 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:36:39 crc kubenswrapper[4854]: I1125 09:36:39.761506 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:39 crc kubenswrapper[4854]: I1125 09:36:39.762703 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:39 crc kubenswrapper[4854]: I1125 09:36:39.762731 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:39 crc kubenswrapper[4854]: I1125 09:36:39.762740 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:39 crc kubenswrapper[4854]: I1125 09:36:39.769848 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:36:40 crc kubenswrapper[4854]: I1125 09:36:40.074191 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"17790538cfb9e52cbdad680b3785ef7144794625a35ee365becd06f97a47259e"} Nov 25 09:36:40 crc kubenswrapper[4854]: I1125 09:36:40.074237 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"6e5057b74c7636c2d27ecdc2399cf9b2e7f7b35bd78d33d6a1eef7fe83899adf"} Nov 25 09:36:40 crc kubenswrapper[4854]: I1125 09:36:40.074248 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"18ca83fff043120ae1be5dbbeaa0545a736345596efc34404b80cf80a0985909"} Nov 25 09:36:40 crc kubenswrapper[4854]: I1125 09:36:40.074263 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"247efd089108fb7006c8a1e413dfdb20078a32f91bfad045119976dba2d83d06"} Nov 25 09:36:40 crc kubenswrapper[4854]: I1125 09:36:40.074319 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"b81ebaffe0aabbefc3045b916d17e3168b0542b492d109644a6814bbe0d282d1"} Nov 25 09:36:40 crc kubenswrapper[4854]: I1125 09:36:40.074338 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:36:40 crc kubenswrapper[4854]: I1125 09:36:40.074250 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:40 crc kubenswrapper[4854]: I1125 09:36:40.074409 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:40 crc kubenswrapper[4854]: I1125 09:36:40.074424 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:40 crc kubenswrapper[4854]: I1125 09:36:40.074442 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:40 crc kubenswrapper[4854]: I1125 09:36:40.074536 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:36:40 crc kubenswrapper[4854]: I1125 09:36:40.075986 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:40 crc kubenswrapper[4854]: I1125 09:36:40.076014 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:40 crc kubenswrapper[4854]: I1125 09:36:40.076033 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:40 crc kubenswrapper[4854]: I1125 09:36:40.076062 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:40 crc kubenswrapper[4854]: I1125 09:36:40.076073 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:40 crc kubenswrapper[4854]: I1125 09:36:40.076101 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:40 crc kubenswrapper[4854]: I1125 09:36:40.076111 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:40 crc kubenswrapper[4854]: I1125 09:36:40.076126 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:40 crc kubenswrapper[4854]: I1125 09:36:40.076169 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:40 crc kubenswrapper[4854]: I1125 09:36:40.076190 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:40 crc kubenswrapper[4854]: I1125 09:36:40.076079 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:40 crc kubenswrapper[4854]: I1125 09:36:40.076218 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:41 crc kubenswrapper[4854]: I1125 09:36:41.076306 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:41 crc kubenswrapper[4854]: I1125 09:36:41.076367 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:41 crc kubenswrapper[4854]: I1125 09:36:41.076392 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:41 crc kubenswrapper[4854]: I1125 09:36:41.077423 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:41 crc kubenswrapper[4854]: I1125 09:36:41.077451 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:41 crc kubenswrapper[4854]: I1125 09:36:41.077455 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:41 crc kubenswrapper[4854]: I1125 09:36:41.077460 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:41 crc kubenswrapper[4854]: I1125 09:36:41.077476 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:41 crc kubenswrapper[4854]: I1125 09:36:41.077487 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:41 crc kubenswrapper[4854]: I1125 09:36:41.077515 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:41 crc kubenswrapper[4854]: I1125 09:36:41.077549 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:41 crc kubenswrapper[4854]: I1125 09:36:41.077560 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:41 crc kubenswrapper[4854]: I1125 09:36:41.226334 4854 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Nov 25 09:36:41 crc kubenswrapper[4854]: I1125 09:36:41.381085 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:41 crc kubenswrapper[4854]: I1125 09:36:41.382648 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:41 crc kubenswrapper[4854]: I1125 09:36:41.382720 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:41 crc kubenswrapper[4854]: I1125 09:36:41.382730 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:41 crc kubenswrapper[4854]: I1125 09:36:41.382756 4854 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 09:36:41 crc kubenswrapper[4854]: I1125 09:36:41.909607 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:36:42 crc kubenswrapper[4854]: I1125 09:36:42.078868 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:42 crc kubenswrapper[4854]: I1125 09:36:42.079721 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:42 crc kubenswrapper[4854]: I1125 09:36:42.079779 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:42 crc kubenswrapper[4854]: I1125 09:36:42.079791 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:42 crc kubenswrapper[4854]: I1125 09:36:42.557426 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 25 09:36:42 crc kubenswrapper[4854]: I1125 09:36:42.557609 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:42 crc kubenswrapper[4854]: I1125 09:36:42.558708 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:42 crc kubenswrapper[4854]: I1125 09:36:42.558813 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:42 crc kubenswrapper[4854]: I1125 09:36:42.558830 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:42 crc kubenswrapper[4854]: I1125 09:36:42.952475 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:36:42 crc kubenswrapper[4854]: I1125 09:36:42.952661 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:42 crc kubenswrapper[4854]: I1125 09:36:42.953966 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:42 crc kubenswrapper[4854]: I1125 09:36:42.954017 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:42 crc kubenswrapper[4854]: I1125 09:36:42.954032 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:43 crc kubenswrapper[4854]: I1125 09:36:43.795688 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 25 09:36:43 crc kubenswrapper[4854]: I1125 09:36:43.795881 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:43 crc kubenswrapper[4854]: I1125 09:36:43.797156 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:43 crc kubenswrapper[4854]: I1125 09:36:43.797200 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:43 crc kubenswrapper[4854]: I1125 09:36:43.797210 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:45 crc kubenswrapper[4854]: E1125 09:36:45.076986 4854 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 25 09:36:46 crc kubenswrapper[4854]: I1125 09:36:46.277494 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:36:46 crc kubenswrapper[4854]: I1125 09:36:46.277705 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:46 crc kubenswrapper[4854]: I1125 09:36:46.278719 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:46 crc kubenswrapper[4854]: I1125 09:36:46.278769 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:46 crc kubenswrapper[4854]: I1125 09:36:46.278787 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:46 crc kubenswrapper[4854]: I1125 09:36:46.281781 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:36:47 crc kubenswrapper[4854]: I1125 09:36:47.092240 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:47 crc kubenswrapper[4854]: I1125 09:36:47.093220 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:47 crc kubenswrapper[4854]: I1125 09:36:47.093266 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:47 crc kubenswrapper[4854]: I1125 09:36:47.093283 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:49 crc kubenswrapper[4854]: I1125 09:36:49.278090 4854 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 09:36:49 crc kubenswrapper[4854]: I1125 09:36:49.278276 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 09:36:49 crc kubenswrapper[4854]: I1125 09:36:49.285474 4854 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="Get \"https://192.168.126.11:6443/livez\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 09:36:49 crc kubenswrapper[4854]: I1125 09:36:49.285567 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="Get \"https://192.168.126.11:6443/livez\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 09:36:49 crc kubenswrapper[4854]: I1125 09:36:49.948106 4854 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Nov 25 09:36:50 crc kubenswrapper[4854]: I1125 09:36:50.103040 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 25 09:36:50 crc kubenswrapper[4854]: I1125 09:36:50.105840 4854 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1" exitCode=255 Nov 25 09:36:50 crc kubenswrapper[4854]: I1125 09:36:50.105907 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1"} Nov 25 09:36:50 crc kubenswrapper[4854]: I1125 09:36:50.106188 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:50 crc kubenswrapper[4854]: I1125 09:36:50.107504 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:50 crc kubenswrapper[4854]: I1125 09:36:50.107557 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:50 crc kubenswrapper[4854]: I1125 09:36:50.107572 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:50 crc kubenswrapper[4854]: I1125 09:36:50.108291 4854 scope.go:117] "RemoveContainer" containerID="0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1" Nov 25 09:36:50 crc kubenswrapper[4854]: I1125 09:36:50.545730 4854 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 25 09:36:50 crc kubenswrapper[4854]: I1125 09:36:50.545795 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 25 09:36:51 crc kubenswrapper[4854]: I1125 09:36:51.110031 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 25 09:36:51 crc kubenswrapper[4854]: I1125 09:36:51.111852 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8"} Nov 25 09:36:51 crc kubenswrapper[4854]: I1125 09:36:51.112005 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:51 crc kubenswrapper[4854]: I1125 09:36:51.113000 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:51 crc kubenswrapper[4854]: I1125 09:36:51.113023 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:51 crc kubenswrapper[4854]: I1125 09:36:51.113031 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:53 crc kubenswrapper[4854]: I1125 09:36:53.852502 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 25 09:36:53 crc kubenswrapper[4854]: I1125 09:36:53.853046 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:53 crc kubenswrapper[4854]: I1125 09:36:53.854265 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:53 crc kubenswrapper[4854]: I1125 09:36:53.854398 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:53 crc kubenswrapper[4854]: I1125 09:36:53.854499 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:53 crc kubenswrapper[4854]: I1125 09:36:53.865219 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 25 09:36:54 crc kubenswrapper[4854]: I1125 09:36:54.121370 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:54 crc kubenswrapper[4854]: I1125 09:36:54.122561 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:54 crc kubenswrapper[4854]: I1125 09:36:54.122608 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:54 crc kubenswrapper[4854]: I1125 09:36:54.122625 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:54 crc kubenswrapper[4854]: I1125 09:36:54.291008 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:36:54 crc kubenswrapper[4854]: I1125 09:36:54.291134 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:54 crc kubenswrapper[4854]: I1125 09:36:54.291379 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:36:54 crc kubenswrapper[4854]: I1125 09:36:54.292273 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:54 crc kubenswrapper[4854]: I1125 09:36:54.292297 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:54 crc kubenswrapper[4854]: I1125 09:36:54.292306 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:54 crc kubenswrapper[4854]: I1125 09:36:54.296402 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:36:55 crc kubenswrapper[4854]: E1125 09:36:55.077142 4854 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 25 09:36:55 crc kubenswrapper[4854]: I1125 09:36:55.123925 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:36:55 crc kubenswrapper[4854]: I1125 09:36:55.124964 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:36:55 crc kubenswrapper[4854]: I1125 09:36:55.125004 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:36:55 crc kubenswrapper[4854]: I1125 09:36:55.125016 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:36:55 crc kubenswrapper[4854]: E1125 09:36:55.534372 4854 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Nov 25 09:36:55 crc kubenswrapper[4854]: I1125 09:36:55.535939 4854 trace.go:236] Trace[642756700]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (25-Nov-2025 09:36:44.441) (total time: 11094ms): Nov 25 09:36:55 crc kubenswrapper[4854]: Trace[642756700]: ---"Objects listed" error: 11094ms (09:36:55.535) Nov 25 09:36:55 crc kubenswrapper[4854]: Trace[642756700]: [11.094570744s] [11.094570744s] END Nov 25 09:36:55 crc kubenswrapper[4854]: I1125 09:36:55.535968 4854 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 25 09:36:55 crc kubenswrapper[4854]: E1125 09:36:55.537519 4854 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Nov 25 09:36:55 crc kubenswrapper[4854]: I1125 09:36:55.540124 4854 trace.go:236] Trace[599363412]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (25-Nov-2025 09:36:44.095) (total time: 11444ms): Nov 25 09:36:55 crc kubenswrapper[4854]: Trace[599363412]: ---"Objects listed" error: 11444ms (09:36:55.540) Nov 25 09:36:55 crc kubenswrapper[4854]: Trace[599363412]: [11.444568756s] [11.444568756s] END Nov 25 09:36:55 crc kubenswrapper[4854]: I1125 09:36:55.540175 4854 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 25 09:36:55 crc kubenswrapper[4854]: I1125 09:36:55.540360 4854 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 25 09:36:55 crc kubenswrapper[4854]: I1125 09:36:55.540342 4854 trace.go:236] Trace[652678497]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (25-Nov-2025 09:36:42.103) (total time: 13437ms): Nov 25 09:36:55 crc kubenswrapper[4854]: Trace[652678497]: ---"Objects listed" error: 13437ms (09:36:55.540) Nov 25 09:36:55 crc kubenswrapper[4854]: Trace[652678497]: [13.4371354s] [13.4371354s] END Nov 25 09:36:55 crc kubenswrapper[4854]: I1125 09:36:55.540408 4854 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 25 09:36:55 crc kubenswrapper[4854]: I1125 09:36:55.543431 4854 trace.go:236] Trace[165996697]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (25-Nov-2025 09:36:44.184) (total time: 11358ms): Nov 25 09:36:55 crc kubenswrapper[4854]: Trace[165996697]: ---"Objects listed" error: 11358ms (09:36:55.543) Nov 25 09:36:55 crc kubenswrapper[4854]: Trace[165996697]: [11.358893393s] [11.358893393s] END Nov 25 09:36:55 crc kubenswrapper[4854]: I1125 09:36:55.543462 4854 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 25 09:36:55 crc kubenswrapper[4854]: I1125 09:36:55.544452 4854 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Nov 25 09:36:55 crc kubenswrapper[4854]: I1125 09:36:55.561129 4854 csr.go:261] certificate signing request csr-wz88j is approved, waiting to be issued Nov 25 09:36:55 crc kubenswrapper[4854]: I1125 09:36:55.571115 4854 csr.go:257] certificate signing request csr-wz88j is issued Nov 25 09:36:55 crc kubenswrapper[4854]: I1125 09:36:55.942556 4854 apiserver.go:52] "Watching apiserver" Nov 25 09:36:55 crc kubenswrapper[4854]: I1125 09:36:55.945090 4854 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 25 09:36:55 crc kubenswrapper[4854]: I1125 09:36:55.945353 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf"] Nov 25 09:36:55 crc kubenswrapper[4854]: I1125 09:36:55.945723 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 09:36:55 crc kubenswrapper[4854]: I1125 09:36:55.945808 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:36:55 crc kubenswrapper[4854]: I1125 09:36:55.945954 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:36:55 crc kubenswrapper[4854]: I1125 09:36:55.945988 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:36:55 crc kubenswrapper[4854]: I1125 09:36:55.946082 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 09:36:55 crc kubenswrapper[4854]: E1125 09:36:55.946072 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:36:55 crc kubenswrapper[4854]: E1125 09:36:55.946308 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:36:55 crc kubenswrapper[4854]: I1125 09:36:55.946531 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:36:55 crc kubenswrapper[4854]: E1125 09:36:55.946606 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:36:55 crc kubenswrapper[4854]: I1125 09:36:55.947434 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 25 09:36:55 crc kubenswrapper[4854]: I1125 09:36:55.947743 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 25 09:36:55 crc kubenswrapper[4854]: I1125 09:36:55.947745 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 25 09:36:55 crc kubenswrapper[4854]: I1125 09:36:55.948480 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 25 09:36:55 crc kubenswrapper[4854]: I1125 09:36:55.948483 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 25 09:36:55 crc kubenswrapper[4854]: I1125 09:36:55.948578 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 25 09:36:55 crc kubenswrapper[4854]: I1125 09:36:55.948582 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 25 09:36:55 crc kubenswrapper[4854]: I1125 09:36:55.949506 4854 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 25 09:36:55 crc kubenswrapper[4854]: I1125 09:36:55.951544 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 25 09:36:55 crc kubenswrapper[4854]: I1125 09:36:55.949775 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 25 09:36:55 crc kubenswrapper[4854]: I1125 09:36:55.976884 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:55 crc kubenswrapper[4854]: I1125 09:36:55.987424 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:55 crc kubenswrapper[4854]: I1125 09:36:55.999070 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.013358 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.022911 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.030472 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.038706 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.043867 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.043902 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.043926 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.043952 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.043969 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.043998 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044015 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044032 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044048 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044063 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044106 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044127 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044145 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044163 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044180 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044197 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044212 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044249 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044268 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044286 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044303 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044320 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044350 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044378 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044395 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044410 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044429 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044446 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044450 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044464 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044481 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044498 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044514 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044530 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044549 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044569 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044588 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044604 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044638 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044654 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044686 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044702 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044719 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044736 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044754 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044771 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044788 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044807 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044823 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044841 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044859 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044879 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044896 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044913 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044929 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044946 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044966 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044983 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045011 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045030 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045066 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045084 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045101 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045119 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045135 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045152 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045169 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045189 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045205 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045223 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045242 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045260 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045277 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045294 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045312 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045329 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045347 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045364 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045382 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045401 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045418 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045435 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045452 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045472 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045490 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045507 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045524 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045541 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045557 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045575 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045592 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045609 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045697 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045718 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045736 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045754 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045773 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045791 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045811 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045833 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045850 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045869 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045887 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045903 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045922 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045939 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045958 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045977 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045994 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046013 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046032 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046054 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046072 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046090 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046109 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046129 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046146 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046163 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046180 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046201 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046220 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046238 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046258 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046278 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046296 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046315 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046332 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046349 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046369 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046387 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046407 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046427 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046445 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046464 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046482 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046501 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046518 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046537 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046558 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046576 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046593 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046611 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046646 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046681 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046700 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046718 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046738 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046760 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.044815 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.047787 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045002 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045149 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045226 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.047835 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045363 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045501 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045608 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045582 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045597 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045649 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045790 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045814 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.045835 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046030 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046190 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046226 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046255 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046327 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046347 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046450 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046779 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.047945 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.047970 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.047992 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.048013 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.048032 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.048053 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.048504 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.048529 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.048548 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.048568 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.048588 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.048606 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.048629 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.048652 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.048687 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.048706 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.048726 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.048744 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.048764 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.048783 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.048809 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.048832 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.048854 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.048876 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.048896 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.048921 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.048944 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.048965 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.048984 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.049002 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.049021 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.049039 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.049058 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.049080 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.049104 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.049125 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.049144 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.049164 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.049184 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.049203 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.049224 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.049247 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.049412 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.049450 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.049476 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.049498 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.049521 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.049572 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.049605 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.049631 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.049660 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.049713 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.049738 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.049767 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.049793 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.049990 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.050017 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.050043 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.050070 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.050096 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.050122 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.050173 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.050188 4854 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.050202 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.050215 4854 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.050228 4854 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.050241 4854 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.050257 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.050270 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.050285 4854 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.050298 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.050313 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.050326 4854 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.050340 4854 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.050451 4854 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.050470 4854 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.050482 4854 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.050495 4854 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.050507 4854 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.050520 4854 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.050533 4854 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.050545 4854 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.050558 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.050991 4854 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.053326 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.053695 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046564 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.046768 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.047079 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.047090 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.047203 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.047291 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.047497 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.047527 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.047630 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.047752 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.047770 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.048030 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.048059 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.048135 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.048364 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.048387 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.049585 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.049783 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.050016 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.050059 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.050237 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.050267 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.050343 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.050603 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.050652 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.050710 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.050728 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: E1125 09:36:56.051542 4854 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.052405 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.053619 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.053633 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.053638 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: E1125 09:36:56.053808 4854 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.053847 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.053883 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.054007 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.054347 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.054363 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.054368 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.054418 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.054541 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.054568 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.054807 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.054810 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.054998 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.055332 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.055359 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.055503 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.056091 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.056445 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.056742 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.056706 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.056812 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.056924 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.057033 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.057258 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.057295 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.057437 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.057455 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.057586 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.057632 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.057804 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.057901 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.058032 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.058280 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.058340 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.058372 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.058416 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.058503 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.058765 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.058775 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.058924 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.059014 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.059041 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.059097 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.065318 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.065445 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.065479 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.065607 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.065973 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.066079 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.066264 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.066481 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.066885 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.067301 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.067523 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.068223 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.068389 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.068436 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.068703 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.068980 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.069901 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.070298 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: E1125 09:36:56.070871 4854 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:36:56 crc kubenswrapper[4854]: E1125 09:36:56.070896 4854 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:36:56 crc kubenswrapper[4854]: E1125 09:36:56.070907 4854 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:36:56 crc kubenswrapper[4854]: E1125 09:36:56.070982 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 09:36:56.570941031 +0000 UTC m=+22.423934407 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.071032 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: E1125 09:36:56.071126 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:36:56.571112565 +0000 UTC m=+22.424106041 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.071242 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.071343 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: E1125 09:36:56.071541 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:36:56.571524677 +0000 UTC m=+22.424518053 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:36:56 crc kubenswrapper[4854]: E1125 09:36:56.071920 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:36:56.571909927 +0000 UTC m=+22.424903303 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.071559 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.071854 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.072323 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.072395 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.075449 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:36:56 crc kubenswrapper[4854]: E1125 09:36:56.078000 4854 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:36:56 crc kubenswrapper[4854]: E1125 09:36:56.078026 4854 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:36:56 crc kubenswrapper[4854]: E1125 09:36:56.078039 4854 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.078043 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 09:36:56 crc kubenswrapper[4854]: E1125 09:36:56.078092 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 09:36:56.578074549 +0000 UTC m=+22.431068045 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.078318 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.079269 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.081028 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.081289 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.081576 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.081687 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.082148 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.082350 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.082482 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.082555 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.082400 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.082816 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.082941 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.082842 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.082906 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.083603 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.083628 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.083706 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.083767 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.083797 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.083861 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.084031 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.084722 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.084660 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.085010 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.085241 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.085272 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.085285 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.085341 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.085838 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.085860 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.086039 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.086378 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.086495 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.086687 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.086738 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.087043 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.087445 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.087505 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.087580 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.087622 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.087763 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.087768 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.087691 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.087723 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.087890 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.087933 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.088017 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.088069 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.088097 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.088344 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.088558 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.088692 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.088910 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.089278 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.089661 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.090116 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.090691 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.091232 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.091259 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.091322 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.091392 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.091454 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.091774 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.091859 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.091936 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.092091 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.092092 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.092169 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.092273 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.092337 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.092682 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.096770 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.099764 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.101497 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.121904 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.124523 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.139624 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.155690 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.155802 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.155863 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.155996 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156004 4854 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156047 4854 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156068 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156084 4854 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156119 4854 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156136 4854 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156148 4854 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156165 4854 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156178 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156190 4854 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156201 4854 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156218 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156230 4854 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156240 4854 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156252 4854 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156267 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156280 4854 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156294 4854 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156307 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156324 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156335 4854 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156346 4854 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156361 4854 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156375 4854 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156387 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156399 4854 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156413 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156425 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156437 4854 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156449 4854 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156465 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156477 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156488 4854 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156504 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156515 4854 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156527 4854 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156538 4854 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156554 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156567 4854 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156579 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156592 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156609 4854 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156622 4854 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156634 4854 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156646 4854 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156661 4854 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156724 4854 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156736 4854 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156752 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156766 4854 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156809 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156821 4854 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156838 4854 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156850 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156863 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156874 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156890 4854 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156901 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156911 4854 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156925 4854 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156941 4854 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156953 4854 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156964 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.156979 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.157060 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.157075 4854 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.157089 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.157105 4854 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.157117 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.157130 4854 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.157142 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.157192 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.157664 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.157699 4854 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.157715 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.157727 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.157739 4854 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.157751 4854 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.157765 4854 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.157779 4854 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.157792 4854 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.157833 4854 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.157846 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.157860 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.157872 4854 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.157884 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.157897 4854 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.157909 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.157922 4854 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.157933 4854 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.157947 4854 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.157959 4854 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.157972 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.157985 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.157997 4854 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158008 4854 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158020 4854 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158034 4854 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158046 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158059 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158070 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158082 4854 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158094 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158106 4854 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158119 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158132 4854 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158145 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158157 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158168 4854 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158180 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158191 4854 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158202 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158214 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158225 4854 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158236 4854 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158248 4854 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158259 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158270 4854 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158281 4854 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158292 4854 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158304 4854 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158315 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158326 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158338 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158349 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158360 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158371 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158382 4854 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158394 4854 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158405 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158417 4854 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158428 4854 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158441 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158453 4854 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158464 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158475 4854 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158486 4854 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158498 4854 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158509 4854 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158520 4854 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158532 4854 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158544 4854 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158556 4854 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158594 4854 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158655 4854 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158686 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158699 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158710 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158720 4854 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158731 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158743 4854 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158756 4854 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158767 4854 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158777 4854 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158789 4854 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158800 4854 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158811 4854 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158823 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158833 4854 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158844 4854 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158855 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158865 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158876 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158887 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158898 4854 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158907 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158918 4854 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158929 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158940 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158952 4854 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158963 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.158974 4854 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.262739 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.271426 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:36:56 crc kubenswrapper[4854]: W1125 09:36:56.275043 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-26929ef7805055baec41e8d3769fdb0f3fa1763ce21d52046cb36b59bd473297 WatchSource:0}: Error finding container 26929ef7805055baec41e8d3769fdb0f3fa1763ce21d52046cb36b59bd473297: Status 404 returned error can't find the container with id 26929ef7805055baec41e8d3769fdb0f3fa1763ce21d52046cb36b59bd473297 Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.278932 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.281179 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:36:56 crc kubenswrapper[4854]: W1125 09:36:56.284381 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-3cf7934483e1f495d1f6e964453076df0e33a345e9e74e43376b32aa77f59abf WatchSource:0}: Error finding container 3cf7934483e1f495d1f6e964453076df0e33a345e9e74e43376b32aa77f59abf: Status 404 returned error can't find the container with id 3cf7934483e1f495d1f6e964453076df0e33a345e9e74e43376b32aa77f59abf Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.285434 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.291156 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.292357 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36afbdef-e971-4c88-b8fd-0f289b9dd07c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:36:49Z\\\",\\\"message\\\":\\\"W1125 09:36:38.376967 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:36:38.377570 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063398 cert, and key in /tmp/serving-cert-3164028442/serving-signer.crt, /tmp/serving-cert-3164028442/serving-signer.key\\\\nI1125 09:36:38.736353 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:36:38.739775 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:36:38.739926 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:36:38.743151 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3164028442/tls.crt::/tmp/serving-cert-3164028442/tls.key\\\\\\\"\\\\nF1125 09:36:49.315575 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.304155 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:56 crc kubenswrapper[4854]: W1125 09:36:56.308255 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-c23f468c642c1d13a63656ae8833f1f82770f86a6dfe63e4b04fb8f86261d1db WatchSource:0}: Error finding container c23f468c642c1d13a63656ae8833f1f82770f86a6dfe63e4b04fb8f86261d1db: Status 404 returned error can't find the container with id c23f468c642c1d13a63656ae8833f1f82770f86a6dfe63e4b04fb8f86261d1db Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.318891 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.328967 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.339261 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.353447 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.367105 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.378441 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.389830 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.401729 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36afbdef-e971-4c88-b8fd-0f289b9dd07c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:36:49Z\\\",\\\"message\\\":\\\"W1125 09:36:38.376967 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:36:38.377570 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063398 cert, and key in /tmp/serving-cert-3164028442/serving-signer.crt, /tmp/serving-cert-3164028442/serving-signer.key\\\\nI1125 09:36:38.736353 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:36:38.739775 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:36:38.739926 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:36:38.743151 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3164028442/tls.crt::/tmp/serving-cert-3164028442/tls.key\\\\\\\"\\\\nF1125 09:36:49.315575 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.423438 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.445733 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.463151 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe8c3a2-bb2a-4a49-b104-fb0f10a74b74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c67a2da54854403d420ac7c05ea4211260914f5123d6ac5f086e22b88256a331\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://64018e6a7f7cdfcdd8339c676ebae79d9047a099a69048351406278d2a142863\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53b3a5ae9621d23b07fe07044dba09946f559dc5b8e75d8f1b6a2ccd0c672247\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.472736 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.484561 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.571953 4854 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-11-25 09:31:55 +0000 UTC, rotation deadline is 2026-09-25 00:13:33.750806635 +0000 UTC Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.572058 4854 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 7286h36m37.17875194s for next certificate rotation Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.662578 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.662685 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.662714 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.662742 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.662770 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:36:56 crc kubenswrapper[4854]: E1125 09:36:56.662886 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:36:57.66285758 +0000 UTC m=+23.515850956 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:36:56 crc kubenswrapper[4854]: E1125 09:36:56.662905 4854 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:36:56 crc kubenswrapper[4854]: E1125 09:36:56.662924 4854 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:36:56 crc kubenswrapper[4854]: E1125 09:36:56.662981 4854 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:36:56 crc kubenswrapper[4854]: E1125 09:36:56.662896 4854 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:36:56 crc kubenswrapper[4854]: E1125 09:36:56.663005 4854 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:36:56 crc kubenswrapper[4854]: E1125 09:36:56.663021 4854 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:36:56 crc kubenswrapper[4854]: E1125 09:36:56.663021 4854 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:36:56 crc kubenswrapper[4854]: E1125 09:36:56.663034 4854 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:36:56 crc kubenswrapper[4854]: E1125 09:36:56.662999 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:36:57.662978603 +0000 UTC m=+23.515972079 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:36:56 crc kubenswrapper[4854]: E1125 09:36:56.663093 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:36:57.663073016 +0000 UTC m=+23.516066482 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:36:56 crc kubenswrapper[4854]: E1125 09:36:56.663109 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 09:36:57.663101696 +0000 UTC m=+23.516095202 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:36:56 crc kubenswrapper[4854]: E1125 09:36:56.663126 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 09:36:57.663118417 +0000 UTC m=+23.516111913 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.931891 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-l4c8x"] Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.932220 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-l4c8x" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.933819 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.934002 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-gt7mq"] Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.934692 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-vkjjq"] Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.934887 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.935792 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-txnt5"] Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.937227 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.938525 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-txnt5" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.939874 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.940085 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.941932 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-9qdk4"] Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.942613 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.943501 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.944815 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.944843 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.945198 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.945380 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.945497 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.945536 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.945538 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.945629 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.945742 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.945822 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.945893 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.945962 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.946127 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.946357 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.946371 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.946427 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.946975 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.947132 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.951400 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36afbdef-e971-4c88-b8fd-0f289b9dd07c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:36:49Z\\\",\\\"message\\\":\\\"W1125 09:36:38.376967 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:36:38.377570 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063398 cert, and key in /tmp/serving-cert-3164028442/serving-signer.crt, /tmp/serving-cert-3164028442/serving-signer.key\\\\nI1125 09:36:38.736353 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:36:38.739775 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:36:38.739926 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:36:38.743151 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3164028442/tls.crt::/tmp/serving-cert-3164028442/tls.key\\\\\\\"\\\\nF1125 09:36:49.315575 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.964809 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.965298 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/34f44032-4367-4650-b4e0-02aa8d3209ae-os-release\") pod \"multus-additional-cni-plugins-vkjjq\" (UID: \"34f44032-4367-4650-b4e0-02aa8d3209ae\") " pod="openshift-multus/multus-additional-cni-plugins-vkjjq" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.965339 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-os-release\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.965369 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-host-var-lib-cni-bin\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.965424 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r24xl\" (UniqueName: \"kubernetes.io/projected/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-kube-api-access-r24xl\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.965471 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-host-run-netns\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.965498 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-run-netns\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.965526 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/aa43fdf8-0726-4b6e-bbda-2ac604e9eee0-proxy-tls\") pod \"machine-config-daemon-9qdk4\" (UID: \"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\") " pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.965558 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/34f44032-4367-4650-b4e0-02aa8d3209ae-cni-binary-copy\") pod \"multus-additional-cni-plugins-vkjjq\" (UID: \"34f44032-4367-4650-b4e0-02aa8d3209ae\") " pod="openshift-multus/multus-additional-cni-plugins-vkjjq" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.965588 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-multus-cni-dir\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.965615 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-systemd-units\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.965645 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-run-openvswitch\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.965693 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-ovn-node-metrics-cert\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.965725 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/34f44032-4367-4650-b4e0-02aa8d3209ae-tuning-conf-dir\") pod \"multus-additional-cni-plugins-vkjjq\" (UID: \"34f44032-4367-4650-b4e0-02aa8d3209ae\") " pod="openshift-multus/multus-additional-cni-plugins-vkjjq" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.965756 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-host-var-lib-kubelet\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.965799 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-hostroot\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.965831 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-log-socket\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.965855 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-cni-netd\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.965921 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xkwcs\" (UniqueName: \"kubernetes.io/projected/baab3391-6269-467e-be1c-c992c82ddd7d-kube-api-access-xkwcs\") pod \"node-resolver-l4c8x\" (UID: \"baab3391-6269-467e-be1c-c992c82ddd7d\") " pod="openshift-dns/node-resolver-l4c8x" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.965974 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-system-cni-dir\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.966005 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/a0e9f759-2eea-43cd-9e0a-6f149785c431-cni-binary-copy\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.966035 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-whb45\" (UniqueName: \"kubernetes.io/projected/34f44032-4367-4650-b4e0-02aa8d3209ae-kube-api-access-whb45\") pod \"multus-additional-cni-plugins-vkjjq\" (UID: \"34f44032-4367-4650-b4e0-02aa8d3209ae\") " pod="openshift-multus/multus-additional-cni-plugins-vkjjq" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.966063 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-run-ovn\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.966100 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/34f44032-4367-4650-b4e0-02aa8d3209ae-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-vkjjq\" (UID: \"34f44032-4367-4650-b4e0-02aa8d3209ae\") " pod="openshift-multus/multus-additional-cni-plugins-vkjjq" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.966122 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-node-log\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.966146 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/aa43fdf8-0726-4b6e-bbda-2ac604e9eee0-mcd-auth-proxy-config\") pod \"machine-config-daemon-9qdk4\" (UID: \"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\") " pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.966191 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-cnibin\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.966214 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-host-var-lib-cni-multus\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.966248 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-var-lib-openvswitch\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.966274 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.966318 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-multus-conf-dir\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.966339 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-etc-kubernetes\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.966363 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tbvn7\" (UniqueName: \"kubernetes.io/projected/aa43fdf8-0726-4b6e-bbda-2ac604e9eee0-kube-api-access-tbvn7\") pod \"machine-config-daemon-9qdk4\" (UID: \"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\") " pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.966419 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/34f44032-4367-4650-b4e0-02aa8d3209ae-system-cni-dir\") pod \"multus-additional-cni-plugins-vkjjq\" (UID: \"34f44032-4367-4650-b4e0-02aa8d3209ae\") " pod="openshift-multus/multus-additional-cni-plugins-vkjjq" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.966460 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-host-run-k8s-cni-cncf-io\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.966485 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/a0e9f759-2eea-43cd-9e0a-6f149785c431-multus-daemon-config\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.966510 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-env-overrides\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.966534 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-cni-bin\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.966566 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-ovnkube-config\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.966587 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-run-ovn-kubernetes\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.966619 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-multus-socket-dir-parent\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.966642 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-kubelet\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.966690 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-slash\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.966728 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-run-systemd\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.966758 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/aa43fdf8-0726-4b6e-bbda-2ac604e9eee0-rootfs\") pod \"machine-config-daemon-9qdk4\" (UID: \"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\") " pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.966807 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2vhlt\" (UniqueName: \"kubernetes.io/projected/a0e9f759-2eea-43cd-9e0a-6f149785c431-kube-api-access-2vhlt\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.966829 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-etc-openvswitch\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.966858 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/34f44032-4367-4650-b4e0-02aa8d3209ae-cnibin\") pod \"multus-additional-cni-plugins-vkjjq\" (UID: \"34f44032-4367-4650-b4e0-02aa8d3209ae\") " pod="openshift-multus/multus-additional-cni-plugins-vkjjq" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.966931 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/baab3391-6269-467e-be1c-c992c82ddd7d-hosts-file\") pod \"node-resolver-l4c8x\" (UID: \"baab3391-6269-467e-be1c-c992c82ddd7d\") " pod="openshift-dns/node-resolver-l4c8x" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.967019 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-host-run-multus-certs\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.967052 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-ovnkube-script-lib\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.976454 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.985645 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:56 crc kubenswrapper[4854]: I1125 09:36:56.993399 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l4c8x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"baab3391-6269-467e-be1c-c992c82ddd7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xkwcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l4c8x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.004699 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe8c3a2-bb2a-4a49-b104-fb0f10a74b74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c67a2da54854403d420ac7c05ea4211260914f5123d6ac5f086e22b88256a331\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://64018e6a7f7cdfcdd8339c676ebae79d9047a099a69048351406278d2a142863\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53b3a5ae9621d23b07fe07044dba09946f559dc5b8e75d8f1b6a2ccd0c672247\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.013065 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:36:57 crc kubenswrapper[4854]: E1125 09:36:57.013191 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.014980 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.016532 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.017267 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.018405 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.019018 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.019926 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.020389 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.020972 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.021895 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.022512 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.023367 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.023834 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.024832 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.025278 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.025769 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.026600 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.027207 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.028240 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.028599 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.028735 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.029155 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.030188 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.030631 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.031576 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.032019 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.034460 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.035019 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.037550 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.041617 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.042575 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.043098 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.044119 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.044831 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.046083 4854 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.046224 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.048342 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.049737 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.049805 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.050473 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.052505 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.053774 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.054462 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.055657 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.056511 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.057085 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.058344 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.059528 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.060239 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.061273 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.061934 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.062857 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.063599 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.064116 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.064404 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.064906 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.065429 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.066455 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.067151 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.067329 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r24xl\" (UniqueName: \"kubernetes.io/projected/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-kube-api-access-r24xl\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.067356 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-run-netns\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.067371 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/aa43fdf8-0726-4b6e-bbda-2ac604e9eee0-proxy-tls\") pod \"machine-config-daemon-9qdk4\" (UID: \"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\") " pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.067386 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/34f44032-4367-4650-b4e0-02aa8d3209ae-cni-binary-copy\") pod \"multus-additional-cni-plugins-vkjjq\" (UID: \"34f44032-4367-4650-b4e0-02aa8d3209ae\") " pod="openshift-multus/multus-additional-cni-plugins-vkjjq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.067402 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-multus-cni-dir\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.067415 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-host-run-netns\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.067427 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-run-openvswitch\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.067442 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-ovn-node-metrics-cert\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.067456 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/34f44032-4367-4650-b4e0-02aa8d3209ae-tuning-conf-dir\") pod \"multus-additional-cni-plugins-vkjjq\" (UID: \"34f44032-4367-4650-b4e0-02aa8d3209ae\") " pod="openshift-multus/multus-additional-cni-plugins-vkjjq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.067471 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-host-var-lib-kubelet\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.067487 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-hostroot\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.067500 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-systemd-units\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.067513 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-cni-netd\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.067531 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xkwcs\" (UniqueName: \"kubernetes.io/projected/baab3391-6269-467e-be1c-c992c82ddd7d-kube-api-access-xkwcs\") pod \"node-resolver-l4c8x\" (UID: \"baab3391-6269-467e-be1c-c992c82ddd7d\") " pod="openshift-dns/node-resolver-l4c8x" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.067546 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-system-cni-dir\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.067558 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/a0e9f759-2eea-43cd-9e0a-6f149785c431-cni-binary-copy\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.067571 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-log-socket\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.067610 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-run-ovn\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.067624 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/34f44032-4367-4650-b4e0-02aa8d3209ae-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-vkjjq\" (UID: \"34f44032-4367-4650-b4e0-02aa8d3209ae\") " pod="openshift-multus/multus-additional-cni-plugins-vkjjq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.067640 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-whb45\" (UniqueName: \"kubernetes.io/projected/34f44032-4367-4650-b4e0-02aa8d3209ae-kube-api-access-whb45\") pod \"multus-additional-cni-plugins-vkjjq\" (UID: \"34f44032-4367-4650-b4e0-02aa8d3209ae\") " pod="openshift-multus/multus-additional-cni-plugins-vkjjq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.067690 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/aa43fdf8-0726-4b6e-bbda-2ac604e9eee0-mcd-auth-proxy-config\") pod \"machine-config-daemon-9qdk4\" (UID: \"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\") " pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.067731 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-cnibin\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.067760 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-host-var-lib-cni-multus\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.067766 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-host-var-lib-kubelet\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.067779 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-var-lib-openvswitch\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.067798 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-node-log\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.067862 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-multus-conf-dir\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.067887 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.067940 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tbvn7\" (UniqueName: \"kubernetes.io/projected/aa43fdf8-0726-4b6e-bbda-2ac604e9eee0-kube-api-access-tbvn7\") pod \"machine-config-daemon-9qdk4\" (UID: \"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\") " pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.067964 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/34f44032-4367-4650-b4e0-02aa8d3209ae-system-cni-dir\") pod \"multus-additional-cni-plugins-vkjjq\" (UID: \"34f44032-4367-4650-b4e0-02aa8d3209ae\") " pod="openshift-multus/multus-additional-cni-plugins-vkjjq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.067987 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-host-run-k8s-cni-cncf-io\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.068010 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/a0e9f759-2eea-43cd-9e0a-6f149785c431-multus-daemon-config\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.068031 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-etc-kubernetes\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.068039 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-run-netns\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.068053 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-env-overrides\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.068079 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-cni-bin\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.068101 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-ovnkube-config\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.068121 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-run-ovn-kubernetes\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.068151 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-multus-socket-dir-parent\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.068170 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-kubelet\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.068189 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-slash\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.068209 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-run-systemd\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.068214 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.068247 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/aa43fdf8-0726-4b6e-bbda-2ac604e9eee0-rootfs\") pod \"machine-config-daemon-9qdk4\" (UID: \"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\") " pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.068270 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-etc-openvswitch\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.068292 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/34f44032-4367-4650-b4e0-02aa8d3209ae-cnibin\") pod \"multus-additional-cni-plugins-vkjjq\" (UID: \"34f44032-4367-4650-b4e0-02aa8d3209ae\") " pod="openshift-multus/multus-additional-cni-plugins-vkjjq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.068312 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/baab3391-6269-467e-be1c-c992c82ddd7d-hosts-file\") pod \"node-resolver-l4c8x\" (UID: \"baab3391-6269-467e-be1c-c992c82ddd7d\") " pod="openshift-dns/node-resolver-l4c8x" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.068331 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2vhlt\" (UniqueName: \"kubernetes.io/projected/a0e9f759-2eea-43cd-9e0a-6f149785c431-kube-api-access-2vhlt\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.068353 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-host-run-multus-certs\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.068373 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-ovnkube-script-lib\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.068400 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/34f44032-4367-4650-b4e0-02aa8d3209ae-os-release\") pod \"multus-additional-cni-plugins-vkjjq\" (UID: \"34f44032-4367-4650-b4e0-02aa8d3209ae\") " pod="openshift-multus/multus-additional-cni-plugins-vkjjq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.068417 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-os-release\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.068438 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-host-var-lib-cni-bin\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.068495 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-host-var-lib-cni-bin\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.068527 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-hostroot\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.068552 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-systemd-units\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.068577 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-cni-netd\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.068777 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-system-cni-dir\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.068932 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-host-run-netns\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.069360 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/a0e9f759-2eea-43cd-9e0a-6f149785c431-cni-binary-copy\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.069405 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-log-socket\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.069431 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-run-ovn\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.069587 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/34f44032-4367-4650-b4e0-02aa8d3209ae-cni-binary-copy\") pod \"multus-additional-cni-plugins-vkjjq\" (UID: \"34f44032-4367-4650-b4e0-02aa8d3209ae\") " pod="openshift-multus/multus-additional-cni-plugins-vkjjq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.069869 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/34f44032-4367-4650-b4e0-02aa8d3209ae-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-vkjjq\" (UID: \"34f44032-4367-4650-b4e0-02aa8d3209ae\") " pod="openshift-multus/multus-additional-cni-plugins-vkjjq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.069931 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-multus-cni-dir\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.070542 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/aa43fdf8-0726-4b6e-bbda-2ac604e9eee0-mcd-auth-proxy-config\") pod \"machine-config-daemon-9qdk4\" (UID: \"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\") " pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.070591 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-cnibin\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.070615 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-host-var-lib-cni-multus\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.070636 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-var-lib-openvswitch\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.070659 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-node-log\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.070712 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-run-openvswitch\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.070743 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-multus-socket-dir-parent\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.070761 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-multus-conf-dir\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.070780 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.070913 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/34f44032-4367-4650-b4e0-02aa8d3209ae-system-cni-dir\") pod \"multus-additional-cni-plugins-vkjjq\" (UID: \"34f44032-4367-4650-b4e0-02aa8d3209ae\") " pod="openshift-multus/multus-additional-cni-plugins-vkjjq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.070933 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-host-run-k8s-cni-cncf-io\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.071396 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/a0e9f759-2eea-43cd-9e0a-6f149785c431-multus-daemon-config\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.071425 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-etc-kubernetes\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.071722 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-kubelet\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.071759 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-slash\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.071784 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-run-systemd\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.071789 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/aa43fdf8-0726-4b6e-bbda-2ac604e9eee0-proxy-tls\") pod \"machine-config-daemon-9qdk4\" (UID: \"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\") " pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.071808 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/aa43fdf8-0726-4b6e-bbda-2ac604e9eee0-rootfs\") pod \"machine-config-daemon-9qdk4\" (UID: \"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\") " pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.071832 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-etc-openvswitch\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.071858 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/34f44032-4367-4650-b4e0-02aa8d3209ae-cnibin\") pod \"multus-additional-cni-plugins-vkjjq\" (UID: \"34f44032-4367-4650-b4e0-02aa8d3209ae\") " pod="openshift-multus/multus-additional-cni-plugins-vkjjq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.071895 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/baab3391-6269-467e-be1c-c992c82ddd7d-hosts-file\") pod \"node-resolver-l4c8x\" (UID: \"baab3391-6269-467e-be1c-c992c82ddd7d\") " pod="openshift-dns/node-resolver-l4c8x" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.072031 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-env-overrides\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.072072 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-cni-bin\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.072145 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-host-run-multus-certs\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.072474 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-ovnkube-config\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.072514 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-run-ovn-kubernetes\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.072553 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/34f44032-4367-4650-b4e0-02aa8d3209ae-os-release\") pod \"multus-additional-cni-plugins-vkjjq\" (UID: \"34f44032-4367-4650-b4e0-02aa8d3209ae\") " pod="openshift-multus/multus-additional-cni-plugins-vkjjq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.072585 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/a0e9f759-2eea-43cd-9e0a-6f149785c431-os-release\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.073147 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-ovnkube-script-lib\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.073805 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-ovn-node-metrics-cert\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.078255 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/34f44032-4367-4650-b4e0-02aa8d3209ae-tuning-conf-dir\") pod \"multus-additional-cni-plugins-vkjjq\" (UID: \"34f44032-4367-4650-b4e0-02aa8d3209ae\") " pod="openshift-multus/multus-additional-cni-plugins-vkjjq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.080252 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gt7mq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.085512 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tbvn7\" (UniqueName: \"kubernetes.io/projected/aa43fdf8-0726-4b6e-bbda-2ac604e9eee0-kube-api-access-tbvn7\") pod \"machine-config-daemon-9qdk4\" (UID: \"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\") " pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.085594 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r24xl\" (UniqueName: \"kubernetes.io/projected/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-kube-api-access-r24xl\") pod \"ovnkube-node-gt7mq\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.086348 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xkwcs\" (UniqueName: \"kubernetes.io/projected/baab3391-6269-467e-be1c-c992c82ddd7d-kube-api-access-xkwcs\") pod \"node-resolver-l4c8x\" (UID: \"baab3391-6269-467e-be1c-c992c82ddd7d\") " pod="openshift-dns/node-resolver-l4c8x" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.087305 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2vhlt\" (UniqueName: \"kubernetes.io/projected/a0e9f759-2eea-43cd-9e0a-6f149785c431-kube-api-access-2vhlt\") pod \"multus-txnt5\" (UID: \"a0e9f759-2eea-43cd-9e0a-6f149785c431\") " pod="openshift-multus/multus-txnt5" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.087355 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-whb45\" (UniqueName: \"kubernetes.io/projected/34f44032-4367-4650-b4e0-02aa8d3209ae-kube-api-access-whb45\") pod \"multus-additional-cni-plugins-vkjjq\" (UID: \"34f44032-4367-4650-b4e0-02aa8d3209ae\") " pod="openshift-multus/multus-additional-cni-plugins-vkjjq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.091189 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe8c3a2-bb2a-4a49-b104-fb0f10a74b74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c67a2da54854403d420ac7c05ea4211260914f5123d6ac5f086e22b88256a331\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://64018e6a7f7cdfcdd8339c676ebae79d9047a099a69048351406278d2a142863\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53b3a5ae9621d23b07fe07044dba09946f559dc5b8e75d8f1b6a2ccd0c672247\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.101691 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.108621 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9qdk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.116694 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36afbdef-e971-4c88-b8fd-0f289b9dd07c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:36:49Z\\\",\\\"message\\\":\\\"W1125 09:36:38.376967 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:36:38.377570 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063398 cert, and key in /tmp/serving-cert-3164028442/serving-signer.crt, /tmp/serving-cert-3164028442/serving-signer.key\\\\nI1125 09:36:38.736353 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:36:38.739775 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:36:38.739926 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:36:38.743151 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3164028442/tls.crt::/tmp/serving-cert-3164028442/tls.key\\\\\\\"\\\\nF1125 09:36:49.315575 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.124652 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.132045 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"06a8e6c353d6a9d159c07ccbd19c8d659f360fba227bd027a3d3e32aadff5724"} Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.132117 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"26929ef7805055baec41e8d3769fdb0f3fa1763ce21d52046cb36b59bd473297"} Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.133612 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"c23f468c642c1d13a63656ae8833f1f82770f86a6dfe63e4b04fb8f86261d1db"} Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.133715 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l4c8x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"baab3391-6269-467e-be1c-c992c82ddd7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xkwcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l4c8x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.136348 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"13fffc919890dba4c50685bc1aa11c87a1c3e8ca4c22d40bf3993ecb29cb2b7c"} Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.136393 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"e04e784232d124ccfcfcbb42a1b3f2fe4dd737520c60374a9fe1d1dcd8a9a56c"} Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.136408 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"3cf7934483e1f495d1f6e964453076df0e33a345e9e74e43376b32aa77f59abf"} Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.144595 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.156311 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.168025 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34f44032-4367-4650-b4e0-02aa8d3209ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkjjq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.180413 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-txnt5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e9f759-2eea-43cd-9e0a-6f149785c431\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vhlt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-txnt5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.189631 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a8e6c353d6a9d159c07ccbd19c8d659f360fba227bd027a3d3e32aadff5724\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.199319 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13fffc919890dba4c50685bc1aa11c87a1c3e8ca4c22d40bf3993ecb29cb2b7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e04e784232d124ccfcfcbb42a1b3f2fe4dd737520c60374a9fe1d1dcd8a9a56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.218626 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gt7mq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.227952 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe8c3a2-bb2a-4a49-b104-fb0f10a74b74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c67a2da54854403d420ac7c05ea4211260914f5123d6ac5f086e22b88256a331\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://64018e6a7f7cdfcdd8339c676ebae79d9047a099a69048351406278d2a142863\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53b3a5ae9621d23b07fe07044dba09946f559dc5b8e75d8f1b6a2ccd0c672247\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.240736 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.248078 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9qdk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.255209 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-l4c8x" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.264384 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36afbdef-e971-4c88-b8fd-0f289b9dd07c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:36:49Z\\\",\\\"message\\\":\\\"W1125 09:36:38.376967 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:36:38.377570 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063398 cert, and key in /tmp/serving-cert-3164028442/serving-signer.crt, /tmp/serving-cert-3164028442/serving-signer.key\\\\nI1125 09:36:38.736353 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:36:38.739775 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:36:38.739926 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:36:38.743151 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3164028442/tls.crt::/tmp/serving-cert-3164028442/tls.key\\\\\\\"\\\\nF1125 09:36:49.315575 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.264718 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:36:57 crc kubenswrapper[4854]: W1125 09:36:57.271222 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbaab3391_6269_467e_be1c_c992c82ddd7d.slice/crio-71f9a2b8f1d3c96fc5ce1a1ee2f5e111001bbf8909daabb38df0fe6f095178d3 WatchSource:0}: Error finding container 71f9a2b8f1d3c96fc5ce1a1ee2f5e111001bbf8909daabb38df0fe6f095178d3: Status 404 returned error can't find the container with id 71f9a2b8f1d3c96fc5ce1a1ee2f5e111001bbf8909daabb38df0fe6f095178d3 Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.273150 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.274867 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.282513 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-txnt5" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.284163 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l4c8x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"baab3391-6269-467e-be1c-c992c82ddd7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xkwcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l4c8x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.288724 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.296716 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:57 crc kubenswrapper[4854]: W1125 09:36:57.305902 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod34f44032_4367_4650_b4e0_02aa8d3209ae.slice/crio-8e11742baf31f909c5d08e96617ff848cc4dcea432e7839acf74f8cbe4d3690a WatchSource:0}: Error finding container 8e11742baf31f909c5d08e96617ff848cc4dcea432e7839acf74f8cbe4d3690a: Status 404 returned error can't find the container with id 8e11742baf31f909c5d08e96617ff848cc4dcea432e7839acf74f8cbe4d3690a Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.307714 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.321585 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34f44032-4367-4650-b4e0-02aa8d3209ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkjjq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.332315 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-txnt5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e9f759-2eea-43cd-9e0a-6f149785c431\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vhlt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-txnt5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.673200 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:36:57 crc kubenswrapper[4854]: E1125 09:36:57.673341 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:36:59.673315407 +0000 UTC m=+25.526308783 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.673388 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.673434 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.673465 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:36:57 crc kubenswrapper[4854]: I1125 09:36:57.673484 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:36:57 crc kubenswrapper[4854]: E1125 09:36:57.673553 4854 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:36:57 crc kubenswrapper[4854]: E1125 09:36:57.673565 4854 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:36:57 crc kubenswrapper[4854]: E1125 09:36:57.673584 4854 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:36:57 crc kubenswrapper[4854]: E1125 09:36:57.673597 4854 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:36:57 crc kubenswrapper[4854]: E1125 09:36:57.673618 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:36:59.673599745 +0000 UTC m=+25.526593191 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:36:57 crc kubenswrapper[4854]: E1125 09:36:57.673638 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 09:36:59.673629585 +0000 UTC m=+25.526622961 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:36:57 crc kubenswrapper[4854]: E1125 09:36:57.673622 4854 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:36:57 crc kubenswrapper[4854]: E1125 09:36:57.673656 4854 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:36:57 crc kubenswrapper[4854]: E1125 09:36:57.673659 4854 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:36:57 crc kubenswrapper[4854]: E1125 09:36:57.673691 4854 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:36:57 crc kubenswrapper[4854]: E1125 09:36:57.673707 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:36:59.673696637 +0000 UTC m=+25.526690013 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:36:57 crc kubenswrapper[4854]: E1125 09:36:57.673724 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 09:36:59.673715678 +0000 UTC m=+25.526709054 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.012388 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.012476 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:36:58 crc kubenswrapper[4854]: E1125 09:36:58.012809 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:36:58 crc kubenswrapper[4854]: E1125 09:36:58.012942 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.140719 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-txnt5" event={"ID":"a0e9f759-2eea-43cd-9e0a-6f149785c431","Type":"ContainerStarted","Data":"703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20"} Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.140790 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-txnt5" event={"ID":"a0e9f759-2eea-43cd-9e0a-6f149785c431","Type":"ContainerStarted","Data":"db3adb51529b5cb97b565aec45ad98f036c9cdaf84e6b6415e1e0a4852341cfd"} Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.144504 4854 generic.go:334] "Generic (PLEG): container finished" podID="34f44032-4367-4650-b4e0-02aa8d3209ae" containerID="3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8" exitCode=0 Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.144586 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" event={"ID":"34f44032-4367-4650-b4e0-02aa8d3209ae","Type":"ContainerDied","Data":"3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8"} Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.144637 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" event={"ID":"34f44032-4367-4650-b4e0-02aa8d3209ae","Type":"ContainerStarted","Data":"8e11742baf31f909c5d08e96617ff848cc4dcea432e7839acf74f8cbe4d3690a"} Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.145806 4854 generic.go:334] "Generic (PLEG): container finished" podID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerID="08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1" exitCode=0 Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.145855 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" event={"ID":"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354","Type":"ContainerDied","Data":"08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1"} Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.145872 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" event={"ID":"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354","Type":"ContainerStarted","Data":"bba53019997f81293d66cc26f16c72a727dc367f98f0bc49aa65c04a9d480932"} Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.147460 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-l4c8x" event={"ID":"baab3391-6269-467e-be1c-c992c82ddd7d","Type":"ContainerStarted","Data":"caac5bab62b2f161eafa08b0d713c2619e27e61b24b106286f9e08c46af2c201"} Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.147504 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-l4c8x" event={"ID":"baab3391-6269-467e-be1c-c992c82ddd7d","Type":"ContainerStarted","Data":"71f9a2b8f1d3c96fc5ce1a1ee2f5e111001bbf8909daabb38df0fe6f095178d3"} Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.149366 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerStarted","Data":"5b789d7d13805b447638f66bcfa89b997c9ad47c92e85a776e97e70ea1a7e950"} Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.149399 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerStarted","Data":"0e898d302f42097c6c149260d69f6cdc0bc4088e1b86714c3344a375b16cd7a9"} Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.149413 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerStarted","Data":"4a67f90f052e7bc25db32b948231540143b25caab4f16e2efc7adfc708966024"} Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.157334 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36afbdef-e971-4c88-b8fd-0f289b9dd07c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:36:49Z\\\",\\\"message\\\":\\\"W1125 09:36:38.376967 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:36:38.377570 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063398 cert, and key in /tmp/serving-cert-3164028442/serving-signer.crt, /tmp/serving-cert-3164028442/serving-signer.key\\\\nI1125 09:36:38.736353 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:36:38.739775 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:36:38.739926 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:36:38.743151 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3164028442/tls.crt::/tmp/serving-cert-3164028442/tls.key\\\\\\\"\\\\nF1125 09:36:49.315575 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.171934 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.184357 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l4c8x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"baab3391-6269-467e-be1c-c992c82ddd7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xkwcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l4c8x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.197505 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.211015 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.225547 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34f44032-4367-4650-b4e0-02aa8d3209ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkjjq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.237616 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-txnt5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e9f759-2eea-43cd-9e0a-6f149785c431\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vhlt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-txnt5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.252154 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a8e6c353d6a9d159c07ccbd19c8d659f360fba227bd027a3d3e32aadff5724\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.262777 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13fffc919890dba4c50685bc1aa11c87a1c3e8ca4c22d40bf3993ecb29cb2b7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e04e784232d124ccfcfcbb42a1b3f2fe4dd737520c60374a9fe1d1dcd8a9a56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.281573 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gt7mq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.296422 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe8c3a2-bb2a-4a49-b104-fb0f10a74b74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c67a2da54854403d420ac7c05ea4211260914f5123d6ac5f086e22b88256a331\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://64018e6a7f7cdfcdd8339c676ebae79d9047a099a69048351406278d2a142863\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53b3a5ae9621d23b07fe07044dba09946f559dc5b8e75d8f1b6a2ccd0c672247\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.309102 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.320993 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9qdk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.333891 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36afbdef-e971-4c88-b8fd-0f289b9dd07c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:36:49Z\\\",\\\"message\\\":\\\"W1125 09:36:38.376967 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:36:38.377570 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063398 cert, and key in /tmp/serving-cert-3164028442/serving-signer.crt, /tmp/serving-cert-3164028442/serving-signer.key\\\\nI1125 09:36:38.736353 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:36:38.739775 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:36:38.739926 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:36:38.743151 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3164028442/tls.crt::/tmp/serving-cert-3164028442/tls.key\\\\\\\"\\\\nF1125 09:36:49.315575 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.347258 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.357908 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l4c8x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"baab3391-6269-467e-be1c-c992c82ddd7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caac5bab62b2f161eafa08b0d713c2619e27e61b24b106286f9e08c46af2c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xkwcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l4c8x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.369666 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.385415 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.397944 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34f44032-4367-4650-b4e0-02aa8d3209ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkjjq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.413152 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-txnt5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e9f759-2eea-43cd-9e0a-6f149785c431\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vhlt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-txnt5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.425288 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a8e6c353d6a9d159c07ccbd19c8d659f360fba227bd027a3d3e32aadff5724\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.437074 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13fffc919890dba4c50685bc1aa11c87a1c3e8ca4c22d40bf3993ecb29cb2b7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e04e784232d124ccfcfcbb42a1b3f2fe4dd737520c60374a9fe1d1dcd8a9a56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.455532 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gt7mq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.468574 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe8c3a2-bb2a-4a49-b104-fb0f10a74b74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c67a2da54854403d420ac7c05ea4211260914f5123d6ac5f086e22b88256a331\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://64018e6a7f7cdfcdd8339c676ebae79d9047a099a69048351406278d2a142863\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53b3a5ae9621d23b07fe07044dba09946f559dc5b8e75d8f1b6a2ccd0c672247\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.481428 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:58 crc kubenswrapper[4854]: I1125 09:36:58.491904 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b789d7d13805b447638f66bcfa89b997c9ad47c92e85a776e97e70ea1a7e950\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e898d302f42097c6c149260d69f6cdc0bc4088e1b86714c3344a375b16cd7a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9qdk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.013106 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:36:59 crc kubenswrapper[4854]: E1125 09:36:59.013258 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.142405 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-lf42b"] Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.142735 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-lf42b" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.147235 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.157335 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.157383 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.157557 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.158772 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"cd00115c937d7bd2a5f0c16c846127b630c250ba8bd33ec244ee0df3c2649e2e"} Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.160570 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" event={"ID":"34f44032-4367-4650-b4e0-02aa8d3209ae","Type":"ContainerStarted","Data":"c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b"} Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.163734 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" event={"ID":"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354","Type":"ContainerStarted","Data":"e15435107b0cdf606bf3d4f78ddfec8b6641611a912f3fea51ba1ffc3031df3b"} Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.163768 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" event={"ID":"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354","Type":"ContainerStarted","Data":"eb143029f5bbd51e49119aba69fcb5ecb8febacbdfa089ccef1298a4f372278b"} Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.163778 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" event={"ID":"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354","Type":"ContainerStarted","Data":"6a694f4eb324ec480c999e6d57a53599b61b5bb42c3f3342e4672bc90b2e095e"} Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.163789 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" event={"ID":"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354","Type":"ContainerStarted","Data":"b34a8b788c0ff6e0d0cb1dc375ec9e2ae9a748c911713adc69f9e37f576222ec"} Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.163797 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" event={"ID":"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354","Type":"ContainerStarted","Data":"225d630b20a5da5172de2d03044794bbf6ac21d95421a502d21c642c92421e83"} Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.163805 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" event={"ID":"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354","Type":"ContainerStarted","Data":"6acdb40732904b212fc33575cf482c9e194722160e66442aee0619cda7f2c755"} Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.174959 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.187225 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.193619 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r9zdw\" (UniqueName: \"kubernetes.io/projected/e0358b43-024d-430b-b886-c3ba51fb479e-kube-api-access-r9zdw\") pod \"node-ca-lf42b\" (UID: \"e0358b43-024d-430b-b886-c3ba51fb479e\") " pod="openshift-image-registry/node-ca-lf42b" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.193708 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/e0358b43-024d-430b-b886-c3ba51fb479e-serviceca\") pod \"node-ca-lf42b\" (UID: \"e0358b43-024d-430b-b886-c3ba51fb479e\") " pod="openshift-image-registry/node-ca-lf42b" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.193773 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e0358b43-024d-430b-b886-c3ba51fb479e-host\") pod \"node-ca-lf42b\" (UID: \"e0358b43-024d-430b-b886-c3ba51fb479e\") " pod="openshift-image-registry/node-ca-lf42b" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.201475 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34f44032-4367-4650-b4e0-02aa8d3209ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkjjq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.216430 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-txnt5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e9f759-2eea-43cd-9e0a-6f149785c431\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vhlt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-txnt5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.227733 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a8e6c353d6a9d159c07ccbd19c8d659f360fba227bd027a3d3e32aadff5724\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.240174 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13fffc919890dba4c50685bc1aa11c87a1c3e8ca4c22d40bf3993ecb29cb2b7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e04e784232d124ccfcfcbb42a1b3f2fe4dd737520c60374a9fe1d1dcd8a9a56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.257532 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gt7mq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.271908 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lf42b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0358b43-024d-430b-b886-c3ba51fb479e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9zdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lf42b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.285989 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe8c3a2-bb2a-4a49-b104-fb0f10a74b74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c67a2da54854403d420ac7c05ea4211260914f5123d6ac5f086e22b88256a331\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://64018e6a7f7cdfcdd8339c676ebae79d9047a099a69048351406278d2a142863\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53b3a5ae9621d23b07fe07044dba09946f559dc5b8e75d8f1b6a2ccd0c672247\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.294784 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r9zdw\" (UniqueName: \"kubernetes.io/projected/e0358b43-024d-430b-b886-c3ba51fb479e-kube-api-access-r9zdw\") pod \"node-ca-lf42b\" (UID: \"e0358b43-024d-430b-b886-c3ba51fb479e\") " pod="openshift-image-registry/node-ca-lf42b" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.294848 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/e0358b43-024d-430b-b886-c3ba51fb479e-serviceca\") pod \"node-ca-lf42b\" (UID: \"e0358b43-024d-430b-b886-c3ba51fb479e\") " pod="openshift-image-registry/node-ca-lf42b" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.294871 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e0358b43-024d-430b-b886-c3ba51fb479e-host\") pod \"node-ca-lf42b\" (UID: \"e0358b43-024d-430b-b886-c3ba51fb479e\") " pod="openshift-image-registry/node-ca-lf42b" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.294942 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e0358b43-024d-430b-b886-c3ba51fb479e-host\") pod \"node-ca-lf42b\" (UID: \"e0358b43-024d-430b-b886-c3ba51fb479e\") " pod="openshift-image-registry/node-ca-lf42b" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.295832 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/e0358b43-024d-430b-b886-c3ba51fb479e-serviceca\") pod \"node-ca-lf42b\" (UID: \"e0358b43-024d-430b-b886-c3ba51fb479e\") " pod="openshift-image-registry/node-ca-lf42b" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.297238 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.309852 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b789d7d13805b447638f66bcfa89b997c9ad47c92e85a776e97e70ea1a7e950\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e898d302f42097c6c149260d69f6cdc0bc4088e1b86714c3344a375b16cd7a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9qdk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.311656 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r9zdw\" (UniqueName: \"kubernetes.io/projected/e0358b43-024d-430b-b886-c3ba51fb479e-kube-api-access-r9zdw\") pod \"node-ca-lf42b\" (UID: \"e0358b43-024d-430b-b886-c3ba51fb479e\") " pod="openshift-image-registry/node-ca-lf42b" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.322199 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36afbdef-e971-4c88-b8fd-0f289b9dd07c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:36:49Z\\\",\\\"message\\\":\\\"W1125 09:36:38.376967 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:36:38.377570 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063398 cert, and key in /tmp/serving-cert-3164028442/serving-signer.crt, /tmp/serving-cert-3164028442/serving-signer.key\\\\nI1125 09:36:38.736353 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:36:38.739775 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:36:38.739926 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:36:38.743151 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3164028442/tls.crt::/tmp/serving-cert-3164028442/tls.key\\\\\\\"\\\\nF1125 09:36:49.315575 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.333144 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.342748 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l4c8x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"baab3391-6269-467e-be1c-c992c82ddd7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caac5bab62b2f161eafa08b0d713c2619e27e61b24b106286f9e08c46af2c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xkwcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l4c8x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.438856 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gt7mq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.457057 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-lf42b" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.458600 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lf42b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0358b43-024d-430b-b886-c3ba51fb479e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9zdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lf42b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.491610 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a8e6c353d6a9d159c07ccbd19c8d659f360fba227bd027a3d3e32aadff5724\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.507006 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13fffc919890dba4c50685bc1aa11c87a1c3e8ca4c22d40bf3993ecb29cb2b7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e04e784232d124ccfcfcbb42a1b3f2fe4dd737520c60374a9fe1d1dcd8a9a56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.520241 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b789d7d13805b447638f66bcfa89b997c9ad47c92e85a776e97e70ea1a7e950\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e898d302f42097c6c149260d69f6cdc0bc4088e1b86714c3344a375b16cd7a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9qdk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.535239 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe8c3a2-bb2a-4a49-b104-fb0f10a74b74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c67a2da54854403d420ac7c05ea4211260914f5123d6ac5f086e22b88256a331\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://64018e6a7f7cdfcdd8339c676ebae79d9047a099a69048351406278d2a142863\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53b3a5ae9621d23b07fe07044dba09946f559dc5b8e75d8f1b6a2ccd0c672247\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.550135 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.562312 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l4c8x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"baab3391-6269-467e-be1c-c992c82ddd7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caac5bab62b2f161eafa08b0d713c2619e27e61b24b106286f9e08c46af2c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xkwcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l4c8x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.580978 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36afbdef-e971-4c88-b8fd-0f289b9dd07c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:36:49Z\\\",\\\"message\\\":\\\"W1125 09:36:38.376967 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:36:38.377570 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063398 cert, and key in /tmp/serving-cert-3164028442/serving-signer.crt, /tmp/serving-cert-3164028442/serving-signer.key\\\\nI1125 09:36:38.736353 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:36:38.739775 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:36:38.739926 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:36:38.743151 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3164028442/tls.crt::/tmp/serving-cert-3164028442/tls.key\\\\\\\"\\\\nF1125 09:36:49.315575 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.595414 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd00115c937d7bd2a5f0c16c846127b630c250ba8bd33ec244ee0df3c2649e2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.611592 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-txnt5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e9f759-2eea-43cd-9e0a-6f149785c431\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vhlt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-txnt5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.626520 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.639952 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.657599 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34f44032-4367-4650-b4e0-02aa8d3209ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkjjq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:36:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.711779 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.711913 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:36:59 crc kubenswrapper[4854]: E1125 09:36:59.711980 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:37:03.711956492 +0000 UTC m=+29.564949868 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.712046 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:36:59 crc kubenswrapper[4854]: E1125 09:36:59.712060 4854 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.712076 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:36:59 crc kubenswrapper[4854]: I1125 09:36:59.712129 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:36:59 crc kubenswrapper[4854]: E1125 09:36:59.712083 4854 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:36:59 crc kubenswrapper[4854]: E1125 09:36:59.712198 4854 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:36:59 crc kubenswrapper[4854]: E1125 09:36:59.712206 4854 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:36:59 crc kubenswrapper[4854]: E1125 09:36:59.712213 4854 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:36:59 crc kubenswrapper[4854]: E1125 09:36:59.712230 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:37:03.71222316 +0000 UTC m=+29.565216606 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:36:59 crc kubenswrapper[4854]: E1125 09:36:59.712237 4854 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:36:59 crc kubenswrapper[4854]: E1125 09:36:59.712249 4854 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:36:59 crc kubenswrapper[4854]: E1125 09:36:59.712283 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 09:37:03.712264112 +0000 UTC m=+29.565257488 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:36:59 crc kubenswrapper[4854]: E1125 09:36:59.712115 4854 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:36:59 crc kubenswrapper[4854]: E1125 09:36:59.712331 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 09:37:03.712318053 +0000 UTC m=+29.565311429 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:36:59 crc kubenswrapper[4854]: E1125 09:36:59.712346 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:37:03.712339754 +0000 UTC m=+29.565333130 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:37:00 crc kubenswrapper[4854]: I1125 09:37:00.012429 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:37:00 crc kubenswrapper[4854]: I1125 09:37:00.012514 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:37:00 crc kubenswrapper[4854]: E1125 09:37:00.012570 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:37:00 crc kubenswrapper[4854]: E1125 09:37:00.012772 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:37:00 crc kubenswrapper[4854]: I1125 09:37:00.168993 4854 generic.go:334] "Generic (PLEG): container finished" podID="34f44032-4367-4650-b4e0-02aa8d3209ae" containerID="c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b" exitCode=0 Nov 25 09:37:00 crc kubenswrapper[4854]: I1125 09:37:00.169068 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" event={"ID":"34f44032-4367-4650-b4e0-02aa8d3209ae","Type":"ContainerDied","Data":"c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b"} Nov 25 09:37:00 crc kubenswrapper[4854]: I1125 09:37:00.170258 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-lf42b" event={"ID":"e0358b43-024d-430b-b886-c3ba51fb479e","Type":"ContainerStarted","Data":"87eda457a770951b297e3cf8ac671dea47a1dcf9c8e6fc8e3dba0c8bb2564aad"} Nov 25 09:37:00 crc kubenswrapper[4854]: I1125 09:37:00.186048 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36afbdef-e971-4c88-b8fd-0f289b9dd07c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:36:49Z\\\",\\\"message\\\":\\\"W1125 09:36:38.376967 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:36:38.377570 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063398 cert, and key in /tmp/serving-cert-3164028442/serving-signer.crt, /tmp/serving-cert-3164028442/serving-signer.key\\\\nI1125 09:36:38.736353 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:36:38.739775 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:36:38.739926 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:36:38.743151 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3164028442/tls.crt::/tmp/serving-cert-3164028442/tls.key\\\\\\\"\\\\nF1125 09:36:49.315575 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:00Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:00 crc kubenswrapper[4854]: I1125 09:37:00.197788 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd00115c937d7bd2a5f0c16c846127b630c250ba8bd33ec244ee0df3c2649e2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:00Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:00 crc kubenswrapper[4854]: I1125 09:37:00.209083 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l4c8x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"baab3391-6269-467e-be1c-c992c82ddd7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caac5bab62b2f161eafa08b0d713c2619e27e61b24b106286f9e08c46af2c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xkwcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l4c8x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:00Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:00 crc kubenswrapper[4854]: I1125 09:37:00.221511 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:00Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:00 crc kubenswrapper[4854]: I1125 09:37:00.235684 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:00Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:00 crc kubenswrapper[4854]: I1125 09:37:00.251350 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34f44032-4367-4650-b4e0-02aa8d3209ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkjjq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:00Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:00 crc kubenswrapper[4854]: I1125 09:37:00.268840 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-txnt5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e9f759-2eea-43cd-9e0a-6f149785c431\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vhlt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-txnt5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:00Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:00 crc kubenswrapper[4854]: I1125 09:37:00.278463 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lf42b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0358b43-024d-430b-b886-c3ba51fb479e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9zdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lf42b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:00Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:00 crc kubenswrapper[4854]: I1125 09:37:00.290214 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a8e6c353d6a9d159c07ccbd19c8d659f360fba227bd027a3d3e32aadff5724\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:00Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:00 crc kubenswrapper[4854]: I1125 09:37:00.302535 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13fffc919890dba4c50685bc1aa11c87a1c3e8ca4c22d40bf3993ecb29cb2b7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e04e784232d124ccfcfcbb42a1b3f2fe4dd737520c60374a9fe1d1dcd8a9a56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:00Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:00 crc kubenswrapper[4854]: I1125 09:37:00.322337 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gt7mq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:00Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:00 crc kubenswrapper[4854]: I1125 09:37:00.333540 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe8c3a2-bb2a-4a49-b104-fb0f10a74b74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c67a2da54854403d420ac7c05ea4211260914f5123d6ac5f086e22b88256a331\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://64018e6a7f7cdfcdd8339c676ebae79d9047a099a69048351406278d2a142863\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53b3a5ae9621d23b07fe07044dba09946f559dc5b8e75d8f1b6a2ccd0c672247\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:00Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:00 crc kubenswrapper[4854]: I1125 09:37:00.367314 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:00Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:00 crc kubenswrapper[4854]: I1125 09:37:00.383087 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b789d7d13805b447638f66bcfa89b997c9ad47c92e85a776e97e70ea1a7e950\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e898d302f42097c6c149260d69f6cdc0bc4088e1b86714c3344a375b16cd7a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9qdk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:00Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.012434 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:37:01 crc kubenswrapper[4854]: E1125 09:37:01.012565 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.174615 4854 generic.go:334] "Generic (PLEG): container finished" podID="34f44032-4367-4650-b4e0-02aa8d3209ae" containerID="2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998" exitCode=0 Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.174777 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" event={"ID":"34f44032-4367-4650-b4e0-02aa8d3209ae","Type":"ContainerDied","Data":"2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998"} Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.175985 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-lf42b" event={"ID":"e0358b43-024d-430b-b886-c3ba51fb479e","Type":"ContainerStarted","Data":"77304ae3027f1ed588bd648fe7573d6a3bc7502ab02db6687fd7f9a885429104"} Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.199413 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-txnt5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e9f759-2eea-43cd-9e0a-6f149785c431\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vhlt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-txnt5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.216456 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.229416 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.247603 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34f44032-4367-4650-b4e0-02aa8d3209ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkjjq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.271417 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gt7mq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.283179 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lf42b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0358b43-024d-430b-b886-c3ba51fb479e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9zdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lf42b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.296035 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a8e6c353d6a9d159c07ccbd19c8d659f360fba227bd027a3d3e32aadff5724\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.308232 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13fffc919890dba4c50685bc1aa11c87a1c3e8ca4c22d40bf3993ecb29cb2b7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e04e784232d124ccfcfcbb42a1b3f2fe4dd737520c60374a9fe1d1dcd8a9a56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.318154 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b789d7d13805b447638f66bcfa89b997c9ad47c92e85a776e97e70ea1a7e950\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e898d302f42097c6c149260d69f6cdc0bc4088e1b86714c3344a375b16cd7a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9qdk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.332551 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe8c3a2-bb2a-4a49-b104-fb0f10a74b74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c67a2da54854403d420ac7c05ea4211260914f5123d6ac5f086e22b88256a331\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://64018e6a7f7cdfcdd8339c676ebae79d9047a099a69048351406278d2a142863\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53b3a5ae9621d23b07fe07044dba09946f559dc5b8e75d8f1b6a2ccd0c672247\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.345075 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.356408 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l4c8x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"baab3391-6269-467e-be1c-c992c82ddd7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caac5bab62b2f161eafa08b0d713c2619e27e61b24b106286f9e08c46af2c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xkwcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l4c8x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.369971 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36afbdef-e971-4c88-b8fd-0f289b9dd07c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:36:49Z\\\",\\\"message\\\":\\\"W1125 09:36:38.376967 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:36:38.377570 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063398 cert, and key in /tmp/serving-cert-3164028442/serving-signer.crt, /tmp/serving-cert-3164028442/serving-signer.key\\\\nI1125 09:36:38.736353 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:36:38.739775 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:36:38.739926 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:36:38.743151 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3164028442/tls.crt::/tmp/serving-cert-3164028442/tls.key\\\\\\\"\\\\nF1125 09:36:49.315575 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.380514 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd00115c937d7bd2a5f0c16c846127b630c250ba8bd33ec244ee0df3c2649e2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.391282 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd00115c937d7bd2a5f0c16c846127b630c250ba8bd33ec244ee0df3c2649e2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.400167 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l4c8x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"baab3391-6269-467e-be1c-c992c82ddd7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caac5bab62b2f161eafa08b0d713c2619e27e61b24b106286f9e08c46af2c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xkwcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l4c8x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.410697 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36afbdef-e971-4c88-b8fd-0f289b9dd07c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:36:49Z\\\",\\\"message\\\":\\\"W1125 09:36:38.376967 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:36:38.377570 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063398 cert, and key in /tmp/serving-cert-3164028442/serving-signer.crt, /tmp/serving-cert-3164028442/serving-signer.key\\\\nI1125 09:36:38.736353 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:36:38.739775 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:36:38.739926 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:36:38.743151 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3164028442/tls.crt::/tmp/serving-cert-3164028442/tls.key\\\\\\\"\\\\nF1125 09:36:49.315575 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.420556 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.434518 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34f44032-4367-4650-b4e0-02aa8d3209ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkjjq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.449074 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-txnt5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e9f759-2eea-43cd-9e0a-6f149785c431\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vhlt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-txnt5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.460155 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.471442 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a8e6c353d6a9d159c07ccbd19c8d659f360fba227bd027a3d3e32aadff5724\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.483088 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13fffc919890dba4c50685bc1aa11c87a1c3e8ca4c22d40bf3993ecb29cb2b7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e04e784232d124ccfcfcbb42a1b3f2fe4dd737520c60374a9fe1d1dcd8a9a56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.500372 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gt7mq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.508631 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lf42b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0358b43-024d-430b-b886-c3ba51fb479e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://77304ae3027f1ed588bd648fe7573d6a3bc7502ab02db6687fd7f9a885429104\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9zdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lf42b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.519209 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe8c3a2-bb2a-4a49-b104-fb0f10a74b74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c67a2da54854403d420ac7c05ea4211260914f5123d6ac5f086e22b88256a331\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://64018e6a7f7cdfcdd8339c676ebae79d9047a099a69048351406278d2a142863\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53b3a5ae9621d23b07fe07044dba09946f559dc5b8e75d8f1b6a2ccd0c672247\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.533169 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.547760 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b789d7d13805b447638f66bcfa89b997c9ad47c92e85a776e97e70ea1a7e950\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e898d302f42097c6c149260d69f6cdc0bc4088e1b86714c3344a375b16cd7a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9qdk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.938121 4854 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.940568 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.940645 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.940721 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.940886 4854 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.954498 4854 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.954795 4854 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.955933 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.955971 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.955981 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.956010 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.956021 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:01Z","lastTransitionTime":"2025-11-25T09:37:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:01 crc kubenswrapper[4854]: E1125 09:37:01.977307 4854 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a363dd8e-616a-41fb-b3a6-8f9b7ff40e37\\\",\\\"systemUUID\\\":\\\"9ec5d79d-dba9-49c0-8c51-26f030e53128\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.983585 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.983728 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.983743 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.983765 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:01 crc kubenswrapper[4854]: I1125 09:37:01.983778 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:01Z","lastTransitionTime":"2025-11-25T09:37:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:01 crc kubenswrapper[4854]: E1125 09:37:01.997324 4854 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a363dd8e-616a-41fb-b3a6-8f9b7ff40e37\\\",\\\"systemUUID\\\":\\\"9ec5d79d-dba9-49c0-8c51-26f030e53128\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:01Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.000924 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.000984 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.001002 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.001027 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.001044 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:02Z","lastTransitionTime":"2025-11-25T09:37:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.012942 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:37:02 crc kubenswrapper[4854]: E1125 09:37:02.013188 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.012950 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:37:02 crc kubenswrapper[4854]: E1125 09:37:02.013332 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:37:02 crc kubenswrapper[4854]: E1125 09:37:02.013825 4854 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a363dd8e-616a-41fb-b3a6-8f9b7ff40e37\\\",\\\"systemUUID\\\":\\\"9ec5d79d-dba9-49c0-8c51-26f030e53128\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.021577 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.021603 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.021661 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.021688 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.021696 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:02Z","lastTransitionTime":"2025-11-25T09:37:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:02 crc kubenswrapper[4854]: E1125 09:37:02.036419 4854 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a363dd8e-616a-41fb-b3a6-8f9b7ff40e37\\\",\\\"systemUUID\\\":\\\"9ec5d79d-dba9-49c0-8c51-26f030e53128\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.039761 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.039806 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.039816 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.039832 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.039841 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:02Z","lastTransitionTime":"2025-11-25T09:37:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:02 crc kubenswrapper[4854]: E1125 09:37:02.058850 4854 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a363dd8e-616a-41fb-b3a6-8f9b7ff40e37\\\",\\\"systemUUID\\\":\\\"9ec5d79d-dba9-49c0-8c51-26f030e53128\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:02 crc kubenswrapper[4854]: E1125 09:37:02.059319 4854 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.060936 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.060973 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.060984 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.061003 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.061035 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:02Z","lastTransitionTime":"2025-11-25T09:37:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.164064 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.164105 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.164116 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.164131 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.164143 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:02Z","lastTransitionTime":"2025-11-25T09:37:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.182708 4854 generic.go:334] "Generic (PLEG): container finished" podID="34f44032-4367-4650-b4e0-02aa8d3209ae" containerID="aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b" exitCode=0 Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.182798 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" event={"ID":"34f44032-4367-4650-b4e0-02aa8d3209ae","Type":"ContainerDied","Data":"aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b"} Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.188564 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" event={"ID":"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354","Type":"ContainerStarted","Data":"50a479ca6721781365a54ff1fd8aad2b8b97e0f298fe46b62c51cbc2434e980b"} Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.198077 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13fffc919890dba4c50685bc1aa11c87a1c3e8ca4c22d40bf3993ecb29cb2b7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e04e784232d124ccfcfcbb42a1b3f2fe4dd737520c60374a9fe1d1dcd8a9a56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.227352 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gt7mq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.242195 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lf42b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0358b43-024d-430b-b886-c3ba51fb479e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://77304ae3027f1ed588bd648fe7573d6a3bc7502ab02db6687fd7f9a885429104\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9zdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lf42b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.257423 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a8e6c353d6a9d159c07ccbd19c8d659f360fba227bd027a3d3e32aadff5724\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.266697 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.266736 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.266750 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.266768 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.266780 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:02Z","lastTransitionTime":"2025-11-25T09:37:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.271308 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.281664 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b789d7d13805b447638f66bcfa89b997c9ad47c92e85a776e97e70ea1a7e950\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e898d302f42097c6c149260d69f6cdc0bc4088e1b86714c3344a375b16cd7a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9qdk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.293553 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe8c3a2-bb2a-4a49-b104-fb0f10a74b74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c67a2da54854403d420ac7c05ea4211260914f5123d6ac5f086e22b88256a331\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://64018e6a7f7cdfcdd8339c676ebae79d9047a099a69048351406278d2a142863\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53b3a5ae9621d23b07fe07044dba09946f559dc5b8e75d8f1b6a2ccd0c672247\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.305772 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd00115c937d7bd2a5f0c16c846127b630c250ba8bd33ec244ee0df3c2649e2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.316107 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l4c8x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"baab3391-6269-467e-be1c-c992c82ddd7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caac5bab62b2f161eafa08b0d713c2619e27e61b24b106286f9e08c46af2c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xkwcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l4c8x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.330921 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36afbdef-e971-4c88-b8fd-0f289b9dd07c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:36:49Z\\\",\\\"message\\\":\\\"W1125 09:36:38.376967 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:36:38.377570 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063398 cert, and key in /tmp/serving-cert-3164028442/serving-signer.crt, /tmp/serving-cert-3164028442/serving-signer.key\\\\nI1125 09:36:38.736353 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:36:38.739775 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:36:38.739926 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:36:38.743151 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3164028442/tls.crt::/tmp/serving-cert-3164028442/tls.key\\\\\\\"\\\\nF1125 09:36:49.315575 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.344195 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34f44032-4367-4650-b4e0-02aa8d3209ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkjjq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.357179 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-txnt5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e9f759-2eea-43cd-9e0a-6f149785c431\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vhlt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-txnt5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.368785 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.369207 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.369313 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.369376 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.369453 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.369513 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:02Z","lastTransitionTime":"2025-11-25T09:37:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.382917 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.472173 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.472221 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.472236 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.472256 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.472268 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:02Z","lastTransitionTime":"2025-11-25T09:37:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.575512 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.575561 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.575572 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.575588 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.575600 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:02Z","lastTransitionTime":"2025-11-25T09:37:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.678417 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.678453 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.678462 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.678475 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.678485 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:02Z","lastTransitionTime":"2025-11-25T09:37:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.780988 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.781028 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.781040 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.781058 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.781074 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:02Z","lastTransitionTime":"2025-11-25T09:37:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.883086 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.883138 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.883151 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.883171 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.883193 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:02Z","lastTransitionTime":"2025-11-25T09:37:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.985624 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.985655 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.985664 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.985691 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:02 crc kubenswrapper[4854]: I1125 09:37:02.985701 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:02Z","lastTransitionTime":"2025-11-25T09:37:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.013452 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:37:03 crc kubenswrapper[4854]: E1125 09:37:03.013649 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.087369 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.087409 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.087417 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.087432 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.087442 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:03Z","lastTransitionTime":"2025-11-25T09:37:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.191186 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.191234 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.191244 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.191261 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.191272 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:03Z","lastTransitionTime":"2025-11-25T09:37:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.195308 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" event={"ID":"34f44032-4367-4650-b4e0-02aa8d3209ae","Type":"ContainerStarted","Data":"71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60"} Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.215572 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.237562 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34f44032-4367-4650-b4e0-02aa8d3209ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkjjq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.253251 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-txnt5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e9f759-2eea-43cd-9e0a-6f149785c431\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vhlt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-txnt5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.265047 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.280342 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a8e6c353d6a9d159c07ccbd19c8d659f360fba227bd027a3d3e32aadff5724\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.293487 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.293543 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.293565 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.293597 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.293635 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:03Z","lastTransitionTime":"2025-11-25T09:37:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.299581 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13fffc919890dba4c50685bc1aa11c87a1c3e8ca4c22d40bf3993ecb29cb2b7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e04e784232d124ccfcfcbb42a1b3f2fe4dd737520c60374a9fe1d1dcd8a9a56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.325754 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gt7mq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.337926 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lf42b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0358b43-024d-430b-b886-c3ba51fb479e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://77304ae3027f1ed588bd648fe7573d6a3bc7502ab02db6687fd7f9a885429104\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9zdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lf42b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.349991 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe8c3a2-bb2a-4a49-b104-fb0f10a74b74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c67a2da54854403d420ac7c05ea4211260914f5123d6ac5f086e22b88256a331\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://64018e6a7f7cdfcdd8339c676ebae79d9047a099a69048351406278d2a142863\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53b3a5ae9621d23b07fe07044dba09946f559dc5b8e75d8f1b6a2ccd0c672247\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.361854 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.373990 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b789d7d13805b447638f66bcfa89b997c9ad47c92e85a776e97e70ea1a7e950\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e898d302f42097c6c149260d69f6cdc0bc4088e1b86714c3344a375b16cd7a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9qdk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.387321 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd00115c937d7bd2a5f0c16c846127b630c250ba8bd33ec244ee0df3c2649e2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.396200 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.396258 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.396285 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.396313 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.396335 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:03Z","lastTransitionTime":"2025-11-25T09:37:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.398591 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l4c8x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"baab3391-6269-467e-be1c-c992c82ddd7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caac5bab62b2f161eafa08b0d713c2619e27e61b24b106286f9e08c46af2c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xkwcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l4c8x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.418123 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36afbdef-e971-4c88-b8fd-0f289b9dd07c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:36:49Z\\\",\\\"message\\\":\\\"W1125 09:36:38.376967 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:36:38.377570 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063398 cert, and key in /tmp/serving-cert-3164028442/serving-signer.crt, /tmp/serving-cert-3164028442/serving-signer.key\\\\nI1125 09:36:38.736353 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:36:38.739775 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:36:38.739926 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:36:38.743151 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3164028442/tls.crt::/tmp/serving-cert-3164028442/tls.key\\\\\\\"\\\\nF1125 09:36:49.315575 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.499037 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.499084 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.499098 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.499118 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.499133 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:03Z","lastTransitionTime":"2025-11-25T09:37:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.602804 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.603098 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.603116 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.603137 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.603152 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:03Z","lastTransitionTime":"2025-11-25T09:37:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.705796 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.705836 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.705845 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.705858 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.705869 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:03Z","lastTransitionTime":"2025-11-25T09:37:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.785551 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:37:03 crc kubenswrapper[4854]: E1125 09:37:03.785718 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:37:11.785646576 +0000 UTC m=+37.638639962 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.785771 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.785812 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.785843 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.785871 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:37:03 crc kubenswrapper[4854]: E1125 09:37:03.785932 4854 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:37:03 crc kubenswrapper[4854]: E1125 09:37:03.785969 4854 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:37:03 crc kubenswrapper[4854]: E1125 09:37:03.785985 4854 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:37:03 crc kubenswrapper[4854]: E1125 09:37:03.785981 4854 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:37:03 crc kubenswrapper[4854]: E1125 09:37:03.785998 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:37:11.785985636 +0000 UTC m=+37.638979012 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:37:03 crc kubenswrapper[4854]: E1125 09:37:03.785992 4854 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:37:03 crc kubenswrapper[4854]: E1125 09:37:03.786085 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:37:11.786064328 +0000 UTC m=+37.639057714 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:37:03 crc kubenswrapper[4854]: E1125 09:37:03.785997 4854 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:37:03 crc kubenswrapper[4854]: E1125 09:37:03.786155 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 09:37:11.78613986 +0000 UTC m=+37.639133246 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:37:03 crc kubenswrapper[4854]: E1125 09:37:03.786154 4854 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:37:03 crc kubenswrapper[4854]: E1125 09:37:03.786186 4854 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:37:03 crc kubenswrapper[4854]: E1125 09:37:03.786268 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 09:37:11.786246723 +0000 UTC m=+37.639240109 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.808833 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.808883 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.808894 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.808913 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.808924 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:03Z","lastTransitionTime":"2025-11-25T09:37:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.910856 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.910904 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.910915 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.910933 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:03 crc kubenswrapper[4854]: I1125 09:37:03.910942 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:03Z","lastTransitionTime":"2025-11-25T09:37:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.012491 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.012579 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:37:04 crc kubenswrapper[4854]: E1125 09:37:04.012610 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:37:04 crc kubenswrapper[4854]: E1125 09:37:04.012812 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.017495 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.017548 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.017560 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.017579 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.017592 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:04Z","lastTransitionTime":"2025-11-25T09:37:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.120354 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.120402 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.120414 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.120431 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.120441 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:04Z","lastTransitionTime":"2025-11-25T09:37:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.200359 4854 generic.go:334] "Generic (PLEG): container finished" podID="34f44032-4367-4650-b4e0-02aa8d3209ae" containerID="71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60" exitCode=0 Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.200415 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" event={"ID":"34f44032-4367-4650-b4e0-02aa8d3209ae","Type":"ContainerDied","Data":"71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60"} Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.204304 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" event={"ID":"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354","Type":"ContainerStarted","Data":"6ed3ec0852aa5a69e2209e16488053b6d07aedc6620ab7e2964119e94e28d27a"} Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.204585 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.221460 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gt7mq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.222205 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.222236 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.222247 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.222263 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.222274 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:04Z","lastTransitionTime":"2025-11-25T09:37:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.233379 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lf42b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0358b43-024d-430b-b886-c3ba51fb479e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://77304ae3027f1ed588bd648fe7573d6a3bc7502ab02db6687fd7f9a885429104\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9zdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lf42b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.249285 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a8e6c353d6a9d159c07ccbd19c8d659f360fba227bd027a3d3e32aadff5724\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.253322 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.261307 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13fffc919890dba4c50685bc1aa11c87a1c3e8ca4c22d40bf3993ecb29cb2b7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e04e784232d124ccfcfcbb42a1b3f2fe4dd737520c60374a9fe1d1dcd8a9a56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.272575 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b789d7d13805b447638f66bcfa89b997c9ad47c92e85a776e97e70ea1a7e950\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e898d302f42097c6c149260d69f6cdc0bc4088e1b86714c3344a375b16cd7a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9qdk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.284829 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe8c3a2-bb2a-4a49-b104-fb0f10a74b74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c67a2da54854403d420ac7c05ea4211260914f5123d6ac5f086e22b88256a331\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://64018e6a7f7cdfcdd8339c676ebae79d9047a099a69048351406278d2a142863\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53b3a5ae9621d23b07fe07044dba09946f559dc5b8e75d8f1b6a2ccd0c672247\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.295971 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.306433 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l4c8x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"baab3391-6269-467e-be1c-c992c82ddd7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caac5bab62b2f161eafa08b0d713c2619e27e61b24b106286f9e08c46af2c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xkwcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l4c8x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.318047 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36afbdef-e971-4c88-b8fd-0f289b9dd07c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:36:49Z\\\",\\\"message\\\":\\\"W1125 09:36:38.376967 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:36:38.377570 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063398 cert, and key in /tmp/serving-cert-3164028442/serving-signer.crt, /tmp/serving-cert-3164028442/serving-signer.key\\\\nI1125 09:36:38.736353 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:36:38.739775 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:36:38.739926 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:36:38.743151 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3164028442/tls.crt::/tmp/serving-cert-3164028442/tls.key\\\\\\\"\\\\nF1125 09:36:49.315575 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.324466 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.324496 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.324505 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.324520 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.324529 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:04Z","lastTransitionTime":"2025-11-25T09:37:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.329103 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd00115c937d7bd2a5f0c16c846127b630c250ba8bd33ec244ee0df3c2649e2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.339822 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-txnt5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e9f759-2eea-43cd-9e0a-6f149785c431\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vhlt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-txnt5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.350290 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.362983 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.377824 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34f44032-4367-4650-b4e0-02aa8d3209ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkjjq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.389510 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd00115c937d7bd2a5f0c16c846127b630c250ba8bd33ec244ee0df3c2649e2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.401386 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l4c8x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"baab3391-6269-467e-be1c-c992c82ddd7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caac5bab62b2f161eafa08b0d713c2619e27e61b24b106286f9e08c46af2c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xkwcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l4c8x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.414841 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36afbdef-e971-4c88-b8fd-0f289b9dd07c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:36:49Z\\\",\\\"message\\\":\\\"W1125 09:36:38.376967 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:36:38.377570 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063398 cert, and key in /tmp/serving-cert-3164028442/serving-signer.crt, /tmp/serving-cert-3164028442/serving-signer.key\\\\nI1125 09:36:38.736353 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:36:38.739775 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:36:38.739926 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:36:38.743151 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3164028442/tls.crt::/tmp/serving-cert-3164028442/tls.key\\\\\\\"\\\\nF1125 09:36:49.315575 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.426363 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.426803 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.426890 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.426949 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.427010 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.427075 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:04Z","lastTransitionTime":"2025-11-25T09:37:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.438598 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34f44032-4367-4650-b4e0-02aa8d3209ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkjjq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.450458 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-txnt5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e9f759-2eea-43cd-9e0a-6f149785c431\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vhlt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-txnt5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.463735 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.477773 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a8e6c353d6a9d159c07ccbd19c8d659f360fba227bd027a3d3e32aadff5724\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.489847 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13fffc919890dba4c50685bc1aa11c87a1c3e8ca4c22d40bf3993ecb29cb2b7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e04e784232d124ccfcfcbb42a1b3f2fe4dd737520c60374a9fe1d1dcd8a9a56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.506799 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b34a8b788c0ff6e0d0cb1dc375ec9e2ae9a748c911713adc69f9e37f576222ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a694f4eb324ec480c999e6d57a53599b61b5bb42c3f3342e4672bc90b2e095e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e15435107b0cdf606bf3d4f78ddfec8b6641611a912f3fea51ba1ffc3031df3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb143029f5bbd51e49119aba69fcb5ecb8febacbdfa089ccef1298a4f372278b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://225d630b20a5da5172de2d03044794bbf6ac21d95421a502d21c642c92421e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6acdb40732904b212fc33575cf482c9e194722160e66442aee0619cda7f2c755\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ed3ec0852aa5a69e2209e16488053b6d07aedc6620ab7e2964119e94e28d27a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50a479ca6721781365a54ff1fd8aad2b8b97e0f298fe46b62c51cbc2434e980b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gt7mq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.519660 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lf42b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0358b43-024d-430b-b886-c3ba51fb479e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://77304ae3027f1ed588bd648fe7573d6a3bc7502ab02db6687fd7f9a885429104\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9zdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lf42b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.529379 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.529412 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.529421 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.529435 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.529445 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:04Z","lastTransitionTime":"2025-11-25T09:37:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.532320 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe8c3a2-bb2a-4a49-b104-fb0f10a74b74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c67a2da54854403d420ac7c05ea4211260914f5123d6ac5f086e22b88256a331\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://64018e6a7f7cdfcdd8339c676ebae79d9047a099a69048351406278d2a142863\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53b3a5ae9621d23b07fe07044dba09946f559dc5b8e75d8f1b6a2ccd0c672247\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.549940 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.563438 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b789d7d13805b447638f66bcfa89b997c9ad47c92e85a776e97e70ea1a7e950\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e898d302f42097c6c149260d69f6cdc0bc4088e1b86714c3344a375b16cd7a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9qdk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.631900 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.632310 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.632436 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.632597 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.632809 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:04Z","lastTransitionTime":"2025-11-25T09:37:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.735566 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.735806 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.735815 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.735829 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.735839 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:04Z","lastTransitionTime":"2025-11-25T09:37:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.793047 4854 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.838711 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.838742 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.838751 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.838764 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.838772 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:04Z","lastTransitionTime":"2025-11-25T09:37:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.940929 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.941005 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.941018 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.941035 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:04 crc kubenswrapper[4854]: I1125 09:37:04.941420 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:04Z","lastTransitionTime":"2025-11-25T09:37:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.013741 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:37:05 crc kubenswrapper[4854]: E1125 09:37:05.013864 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.028832 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe8c3a2-bb2a-4a49-b104-fb0f10a74b74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c67a2da54854403d420ac7c05ea4211260914f5123d6ac5f086e22b88256a331\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://64018e6a7f7cdfcdd8339c676ebae79d9047a099a69048351406278d2a142863\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53b3a5ae9621d23b07fe07044dba09946f559dc5b8e75d8f1b6a2ccd0c672247\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.041460 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.045225 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.045252 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.045260 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.045273 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.045284 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:05Z","lastTransitionTime":"2025-11-25T09:37:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.054360 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b789d7d13805b447638f66bcfa89b997c9ad47c92e85a776e97e70ea1a7e950\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e898d302f42097c6c149260d69f6cdc0bc4088e1b86714c3344a375b16cd7a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9qdk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.069897 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36afbdef-e971-4c88-b8fd-0f289b9dd07c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:36:49Z\\\",\\\"message\\\":\\\"W1125 09:36:38.376967 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:36:38.377570 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063398 cert, and key in /tmp/serving-cert-3164028442/serving-signer.crt, /tmp/serving-cert-3164028442/serving-signer.key\\\\nI1125 09:36:38.736353 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:36:38.739775 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:36:38.739926 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:36:38.743151 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3164028442/tls.crt::/tmp/serving-cert-3164028442/tls.key\\\\\\\"\\\\nF1125 09:36:49.315575 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.085744 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd00115c937d7bd2a5f0c16c846127b630c250ba8bd33ec244ee0df3c2649e2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.114262 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l4c8x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"baab3391-6269-467e-be1c-c992c82ddd7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caac5bab62b2f161eafa08b0d713c2619e27e61b24b106286f9e08c46af2c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xkwcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l4c8x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.128139 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.140470 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.152473 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.152518 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.152528 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.152544 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.152556 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:05Z","lastTransitionTime":"2025-11-25T09:37:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.154981 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34f44032-4367-4650-b4e0-02aa8d3209ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkjjq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.166887 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-txnt5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e9f759-2eea-43cd-9e0a-6f149785c431\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vhlt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-txnt5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.180702 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a8e6c353d6a9d159c07ccbd19c8d659f360fba227bd027a3d3e32aadff5724\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.193060 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13fffc919890dba4c50685bc1aa11c87a1c3e8ca4c22d40bf3993ecb29cb2b7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e04e784232d124ccfcfcbb42a1b3f2fe4dd737520c60374a9fe1d1dcd8a9a56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.209476 4854 generic.go:334] "Generic (PLEG): container finished" podID="34f44032-4367-4650-b4e0-02aa8d3209ae" containerID="f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2" exitCode=0 Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.209663 4854 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.209818 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" event={"ID":"34f44032-4367-4650-b4e0-02aa8d3209ae","Type":"ContainerDied","Data":"f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2"} Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.210405 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.215419 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b34a8b788c0ff6e0d0cb1dc375ec9e2ae9a748c911713adc69f9e37f576222ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a694f4eb324ec480c999e6d57a53599b61b5bb42c3f3342e4672bc90b2e095e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e15435107b0cdf606bf3d4f78ddfec8b6641611a912f3fea51ba1ffc3031df3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb143029f5bbd51e49119aba69fcb5ecb8febacbdfa089ccef1298a4f372278b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://225d630b20a5da5172de2d03044794bbf6ac21d95421a502d21c642c92421e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6acdb40732904b212fc33575cf482c9e194722160e66442aee0619cda7f2c755\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ed3ec0852aa5a69e2209e16488053b6d07aedc6620ab7e2964119e94e28d27a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50a479ca6721781365a54ff1fd8aad2b8b97e0f298fe46b62c51cbc2434e980b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gt7mq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.225775 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lf42b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0358b43-024d-430b-b886-c3ba51fb479e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://77304ae3027f1ed588bd648fe7573d6a3bc7502ab02db6687fd7f9a885429104\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9zdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lf42b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.232334 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.246552 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a8e6c353d6a9d159c07ccbd19c8d659f360fba227bd027a3d3e32aadff5724\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.254940 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.255145 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.255212 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.255273 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.255349 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:05Z","lastTransitionTime":"2025-11-25T09:37:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.259145 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13fffc919890dba4c50685bc1aa11c87a1c3e8ca4c22d40bf3993ecb29cb2b7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e04e784232d124ccfcfcbb42a1b3f2fe4dd737520c60374a9fe1d1dcd8a9a56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.276952 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b34a8b788c0ff6e0d0cb1dc375ec9e2ae9a748c911713adc69f9e37f576222ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a694f4eb324ec480c999e6d57a53599b61b5bb42c3f3342e4672bc90b2e095e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e15435107b0cdf606bf3d4f78ddfec8b6641611a912f3fea51ba1ffc3031df3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb143029f5bbd51e49119aba69fcb5ecb8febacbdfa089ccef1298a4f372278b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://225d630b20a5da5172de2d03044794bbf6ac21d95421a502d21c642c92421e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6acdb40732904b212fc33575cf482c9e194722160e66442aee0619cda7f2c755\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ed3ec0852aa5a69e2209e16488053b6d07aedc6620ab7e2964119e94e28d27a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50a479ca6721781365a54ff1fd8aad2b8b97e0f298fe46b62c51cbc2434e980b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gt7mq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.287430 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lf42b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0358b43-024d-430b-b886-c3ba51fb479e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://77304ae3027f1ed588bd648fe7573d6a3bc7502ab02db6687fd7f9a885429104\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9zdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lf42b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.300087 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe8c3a2-bb2a-4a49-b104-fb0f10a74b74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c67a2da54854403d420ac7c05ea4211260914f5123d6ac5f086e22b88256a331\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://64018e6a7f7cdfcdd8339c676ebae79d9047a099a69048351406278d2a142863\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53b3a5ae9621d23b07fe07044dba09946f559dc5b8e75d8f1b6a2ccd0c672247\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.315098 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.327237 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b789d7d13805b447638f66bcfa89b997c9ad47c92e85a776e97e70ea1a7e950\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e898d302f42097c6c149260d69f6cdc0bc4088e1b86714c3344a375b16cd7a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9qdk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.344780 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36afbdef-e971-4c88-b8fd-0f289b9dd07c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:36:49Z\\\",\\\"message\\\":\\\"W1125 09:36:38.376967 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:36:38.377570 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063398 cert, and key in /tmp/serving-cert-3164028442/serving-signer.crt, /tmp/serving-cert-3164028442/serving-signer.key\\\\nI1125 09:36:38.736353 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:36:38.739775 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:36:38.739926 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:36:38.743151 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3164028442/tls.crt::/tmp/serving-cert-3164028442/tls.key\\\\\\\"\\\\nF1125 09:36:49.315575 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.355242 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd00115c937d7bd2a5f0c16c846127b630c250ba8bd33ec244ee0df3c2649e2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.357701 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.357731 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.357742 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.357759 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.357770 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:05Z","lastTransitionTime":"2025-11-25T09:37:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.364636 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l4c8x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"baab3391-6269-467e-be1c-c992c82ddd7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caac5bab62b2f161eafa08b0d713c2619e27e61b24b106286f9e08c46af2c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xkwcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l4c8x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.375465 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.386354 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.398972 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34f44032-4367-4650-b4e0-02aa8d3209ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkjjq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.411114 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-txnt5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e9f759-2eea-43cd-9e0a-6f149785c431\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vhlt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-txnt5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.422039 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe8c3a2-bb2a-4a49-b104-fb0f10a74b74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c67a2da54854403d420ac7c05ea4211260914f5123d6ac5f086e22b88256a331\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://64018e6a7f7cdfcdd8339c676ebae79d9047a099a69048351406278d2a142863\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53b3a5ae9621d23b07fe07044dba09946f559dc5b8e75d8f1b6a2ccd0c672247\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.432073 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.441874 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b789d7d13805b447638f66bcfa89b997c9ad47c92e85a776e97e70ea1a7e950\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e898d302f42097c6c149260d69f6cdc0bc4088e1b86714c3344a375b16cd7a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9qdk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.456549 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36afbdef-e971-4c88-b8fd-0f289b9dd07c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:36:49Z\\\",\\\"message\\\":\\\"W1125 09:36:38.376967 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:36:38.377570 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063398 cert, and key in /tmp/serving-cert-3164028442/serving-signer.crt, /tmp/serving-cert-3164028442/serving-signer.key\\\\nI1125 09:36:38.736353 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:36:38.739775 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:36:38.739926 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:36:38.743151 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3164028442/tls.crt::/tmp/serving-cert-3164028442/tls.key\\\\\\\"\\\\nF1125 09:36:49.315575 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.460085 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.460113 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.460125 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.460140 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.460152 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:05Z","lastTransitionTime":"2025-11-25T09:37:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.467231 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd00115c937d7bd2a5f0c16c846127b630c250ba8bd33ec244ee0df3c2649e2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.476160 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l4c8x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"baab3391-6269-467e-be1c-c992c82ddd7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caac5bab62b2f161eafa08b0d713c2619e27e61b24b106286f9e08c46af2c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xkwcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l4c8x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.488270 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.501380 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.514797 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34f44032-4367-4650-b4e0-02aa8d3209ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkjjq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.527382 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-txnt5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e9f759-2eea-43cd-9e0a-6f149785c431\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vhlt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-txnt5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.539426 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a8e6c353d6a9d159c07ccbd19c8d659f360fba227bd027a3d3e32aadff5724\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.550994 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13fffc919890dba4c50685bc1aa11c87a1c3e8ca4c22d40bf3993ecb29cb2b7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e04e784232d124ccfcfcbb42a1b3f2fe4dd737520c60374a9fe1d1dcd8a9a56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.562127 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.562418 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.562502 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.562585 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.562661 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:05Z","lastTransitionTime":"2025-11-25T09:37:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.577350 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b34a8b788c0ff6e0d0cb1dc375ec9e2ae9a748c911713adc69f9e37f576222ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a694f4eb324ec480c999e6d57a53599b61b5bb42c3f3342e4672bc90b2e095e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e15435107b0cdf606bf3d4f78ddfec8b6641611a912f3fea51ba1ffc3031df3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb143029f5bbd51e49119aba69fcb5ecb8febacbdfa089ccef1298a4f372278b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://225d630b20a5da5172de2d03044794bbf6ac21d95421a502d21c642c92421e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6acdb40732904b212fc33575cf482c9e194722160e66442aee0619cda7f2c755\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ed3ec0852aa5a69e2209e16488053b6d07aedc6620ab7e2964119e94e28d27a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50a479ca6721781365a54ff1fd8aad2b8b97e0f298fe46b62c51cbc2434e980b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gt7mq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.586646 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lf42b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0358b43-024d-430b-b886-c3ba51fb479e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://77304ae3027f1ed588bd648fe7573d6a3bc7502ab02db6687fd7f9a885429104\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9zdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lf42b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.664853 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.664891 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.664900 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.664912 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.664922 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:05Z","lastTransitionTime":"2025-11-25T09:37:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.767426 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.767500 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.767515 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.767532 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.767544 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:05Z","lastTransitionTime":"2025-11-25T09:37:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.869915 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.870213 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.870313 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.870402 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.870481 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:05Z","lastTransitionTime":"2025-11-25T09:37:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.972989 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.973034 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.973045 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.973059 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:05 crc kubenswrapper[4854]: I1125 09:37:05.973067 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:05Z","lastTransitionTime":"2025-11-25T09:37:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.012665 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:37:06 crc kubenswrapper[4854]: E1125 09:37:06.012869 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.012698 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:37:06 crc kubenswrapper[4854]: E1125 09:37:06.013199 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.075902 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.076194 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.076256 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.076326 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.076391 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:06Z","lastTransitionTime":"2025-11-25T09:37:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.178984 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.179308 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.179460 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.179629 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.179801 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:06Z","lastTransitionTime":"2025-11-25T09:37:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.216950 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" event={"ID":"34f44032-4367-4650-b4e0-02aa8d3209ae","Type":"ContainerStarted","Data":"7a71ae62f5763cc4804ec5c00a4c8619d470790e92dd99e1afe671ae166458c9"} Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.216980 4854 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.231389 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe8c3a2-bb2a-4a49-b104-fb0f10a74b74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c67a2da54854403d420ac7c05ea4211260914f5123d6ac5f086e22b88256a331\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://64018e6a7f7cdfcdd8339c676ebae79d9047a099a69048351406278d2a142863\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53b3a5ae9621d23b07fe07044dba09946f559dc5b8e75d8f1b6a2ccd0c672247\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.244490 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.256688 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b789d7d13805b447638f66bcfa89b997c9ad47c92e85a776e97e70ea1a7e950\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e898d302f42097c6c149260d69f6cdc0bc4088e1b86714c3344a375b16cd7a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9qdk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.272592 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36afbdef-e971-4c88-b8fd-0f289b9dd07c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:36:49Z\\\",\\\"message\\\":\\\"W1125 09:36:38.376967 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:36:38.377570 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063398 cert, and key in /tmp/serving-cert-3164028442/serving-signer.crt, /tmp/serving-cert-3164028442/serving-signer.key\\\\nI1125 09:36:38.736353 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:36:38.739775 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:36:38.739926 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:36:38.743151 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3164028442/tls.crt::/tmp/serving-cert-3164028442/tls.key\\\\\\\"\\\\nF1125 09:36:49.315575 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.282559 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.282598 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.282610 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.282626 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.282640 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:06Z","lastTransitionTime":"2025-11-25T09:37:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.286416 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd00115c937d7bd2a5f0c16c846127b630c250ba8bd33ec244ee0df3c2649e2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.297210 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l4c8x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"baab3391-6269-467e-be1c-c992c82ddd7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caac5bab62b2f161eafa08b0d713c2619e27e61b24b106286f9e08c46af2c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xkwcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l4c8x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.314277 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.325698 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.352494 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34f44032-4367-4650-b4e0-02aa8d3209ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a71ae62f5763cc4804ec5c00a4c8619d470790e92dd99e1afe671ae166458c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkjjq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.364518 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-txnt5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e9f759-2eea-43cd-9e0a-6f149785c431\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vhlt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-txnt5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.376008 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a8e6c353d6a9d159c07ccbd19c8d659f360fba227bd027a3d3e32aadff5724\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.384526 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.384566 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.384578 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.384595 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.384607 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:06Z","lastTransitionTime":"2025-11-25T09:37:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.386213 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13fffc919890dba4c50685bc1aa11c87a1c3e8ca4c22d40bf3993ecb29cb2b7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e04e784232d124ccfcfcbb42a1b3f2fe4dd737520c60374a9fe1d1dcd8a9a56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.403036 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b34a8b788c0ff6e0d0cb1dc375ec9e2ae9a748c911713adc69f9e37f576222ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a694f4eb324ec480c999e6d57a53599b61b5bb42c3f3342e4672bc90b2e095e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e15435107b0cdf606bf3d4f78ddfec8b6641611a912f3fea51ba1ffc3031df3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb143029f5bbd51e49119aba69fcb5ecb8febacbdfa089ccef1298a4f372278b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://225d630b20a5da5172de2d03044794bbf6ac21d95421a502d21c642c92421e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6acdb40732904b212fc33575cf482c9e194722160e66442aee0619cda7f2c755\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ed3ec0852aa5a69e2209e16488053b6d07aedc6620ab7e2964119e94e28d27a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50a479ca6721781365a54ff1fd8aad2b8b97e0f298fe46b62c51cbc2434e980b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gt7mq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.415388 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lf42b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0358b43-024d-430b-b886-c3ba51fb479e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://77304ae3027f1ed588bd648fe7573d6a3bc7502ab02db6687fd7f9a885429104\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9zdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lf42b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.486478 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.486536 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.486549 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.486566 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.486602 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:06Z","lastTransitionTime":"2025-11-25T09:37:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.589192 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.589442 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.589504 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.589598 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.589657 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:06Z","lastTransitionTime":"2025-11-25T09:37:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.692364 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.692614 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.692711 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.692802 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.692892 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:06Z","lastTransitionTime":"2025-11-25T09:37:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.795612 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.795941 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.795955 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.795972 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.795984 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:06Z","lastTransitionTime":"2025-11-25T09:37:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.897594 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.897633 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.897642 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.897659 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.897681 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:06Z","lastTransitionTime":"2025-11-25T09:37:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.999806 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.999873 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:06 crc kubenswrapper[4854]: I1125 09:37:06.999896 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:06.999929 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:06.999953 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:06Z","lastTransitionTime":"2025-11-25T09:37:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.013146 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:37:07 crc kubenswrapper[4854]: E1125 09:37:07.013359 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.102080 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.102126 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.102137 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.102152 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.102163 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:07Z","lastTransitionTime":"2025-11-25T09:37:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.204218 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.204277 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.204293 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.204317 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.204337 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:07Z","lastTransitionTime":"2025-11-25T09:37:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.220014 4854 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.270600 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.285136 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.297279 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.306313 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.306350 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.306359 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.306374 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.306386 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:07Z","lastTransitionTime":"2025-11-25T09:37:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.315253 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34f44032-4367-4650-b4e0-02aa8d3209ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a71ae62f5763cc4804ec5c00a4c8619d470790e92dd99e1afe671ae166458c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkjjq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.331970 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-txnt5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e9f759-2eea-43cd-9e0a-6f149785c431\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vhlt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-txnt5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.344791 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a8e6c353d6a9d159c07ccbd19c8d659f360fba227bd027a3d3e32aadff5724\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.356599 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13fffc919890dba4c50685bc1aa11c87a1c3e8ca4c22d40bf3993ecb29cb2b7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e04e784232d124ccfcfcbb42a1b3f2fe4dd737520c60374a9fe1d1dcd8a9a56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.373162 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b34a8b788c0ff6e0d0cb1dc375ec9e2ae9a748c911713adc69f9e37f576222ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a694f4eb324ec480c999e6d57a53599b61b5bb42c3f3342e4672bc90b2e095e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e15435107b0cdf606bf3d4f78ddfec8b6641611a912f3fea51ba1ffc3031df3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb143029f5bbd51e49119aba69fcb5ecb8febacbdfa089ccef1298a4f372278b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://225d630b20a5da5172de2d03044794bbf6ac21d95421a502d21c642c92421e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6acdb40732904b212fc33575cf482c9e194722160e66442aee0619cda7f2c755\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ed3ec0852aa5a69e2209e16488053b6d07aedc6620ab7e2964119e94e28d27a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50a479ca6721781365a54ff1fd8aad2b8b97e0f298fe46b62c51cbc2434e980b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gt7mq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.381235 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lf42b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0358b43-024d-430b-b886-c3ba51fb479e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://77304ae3027f1ed588bd648fe7573d6a3bc7502ab02db6687fd7f9a885429104\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9zdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lf42b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.394161 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe8c3a2-bb2a-4a49-b104-fb0f10a74b74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c67a2da54854403d420ac7c05ea4211260914f5123d6ac5f086e22b88256a331\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://64018e6a7f7cdfcdd8339c676ebae79d9047a099a69048351406278d2a142863\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53b3a5ae9621d23b07fe07044dba09946f559dc5b8e75d8f1b6a2ccd0c672247\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.407564 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.408295 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.408349 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.408362 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.408380 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.408392 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:07Z","lastTransitionTime":"2025-11-25T09:37:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.418290 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b789d7d13805b447638f66bcfa89b997c9ad47c92e85a776e97e70ea1a7e950\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e898d302f42097c6c149260d69f6cdc0bc4088e1b86714c3344a375b16cd7a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9qdk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.430365 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36afbdef-e971-4c88-b8fd-0f289b9dd07c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:36:49Z\\\",\\\"message\\\":\\\"W1125 09:36:38.376967 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:36:38.377570 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063398 cert, and key in /tmp/serving-cert-3164028442/serving-signer.crt, /tmp/serving-cert-3164028442/serving-signer.key\\\\nI1125 09:36:38.736353 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:36:38.739775 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:36:38.739926 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:36:38.743151 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3164028442/tls.crt::/tmp/serving-cert-3164028442/tls.key\\\\\\\"\\\\nF1125 09:36:49.315575 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.440446 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd00115c937d7bd2a5f0c16c846127b630c250ba8bd33ec244ee0df3c2649e2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.451596 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l4c8x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"baab3391-6269-467e-be1c-c992c82ddd7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caac5bab62b2f161eafa08b0d713c2619e27e61b24b106286f9e08c46af2c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xkwcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l4c8x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:07Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.511094 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.511159 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.511170 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.511186 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.511196 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:07Z","lastTransitionTime":"2025-11-25T09:37:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.613689 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.613845 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.613960 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.614038 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.614111 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:07Z","lastTransitionTime":"2025-11-25T09:37:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.716852 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.717098 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.717213 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.717308 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.717380 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:07Z","lastTransitionTime":"2025-11-25T09:37:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.819759 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.820032 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.820100 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.820159 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.820226 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:07Z","lastTransitionTime":"2025-11-25T09:37:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.923033 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.923083 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.923096 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.923113 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:07 crc kubenswrapper[4854]: I1125 09:37:07.923143 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:07Z","lastTransitionTime":"2025-11-25T09:37:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.012523 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.012564 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:37:08 crc kubenswrapper[4854]: E1125 09:37:08.012647 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:37:08 crc kubenswrapper[4854]: E1125 09:37:08.012850 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.025618 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.025890 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.025967 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.026028 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.026090 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:08Z","lastTransitionTime":"2025-11-25T09:37:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.129577 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.129634 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.129652 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.129702 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.129720 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:08Z","lastTransitionTime":"2025-11-25T09:37:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.224312 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gt7mq_b2e0e8c4-35b0-4ca8-acec-d6c94cf76354/ovnkube-controller/0.log" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.228469 4854 generic.go:334] "Generic (PLEG): container finished" podID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerID="6ed3ec0852aa5a69e2209e16488053b6d07aedc6620ab7e2964119e94e28d27a" exitCode=1 Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.228608 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" event={"ID":"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354","Type":"ContainerDied","Data":"6ed3ec0852aa5a69e2209e16488053b6d07aedc6620ab7e2964119e94e28d27a"} Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.230987 4854 scope.go:117] "RemoveContainer" containerID="6ed3ec0852aa5a69e2209e16488053b6d07aedc6620ab7e2964119e94e28d27a" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.232993 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.233033 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.233046 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.233066 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.233079 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:08Z","lastTransitionTime":"2025-11-25T09:37:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.244968 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe8c3a2-bb2a-4a49-b104-fb0f10a74b74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c67a2da54854403d420ac7c05ea4211260914f5123d6ac5f086e22b88256a331\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://64018e6a7f7cdfcdd8339c676ebae79d9047a099a69048351406278d2a142863\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53b3a5ae9621d23b07fe07044dba09946f559dc5b8e75d8f1b6a2ccd0c672247\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:08Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.260623 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:08Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.271845 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b789d7d13805b447638f66bcfa89b997c9ad47c92e85a776e97e70ea1a7e950\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e898d302f42097c6c149260d69f6cdc0bc4088e1b86714c3344a375b16cd7a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9qdk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:08Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.287446 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd00115c937d7bd2a5f0c16c846127b630c250ba8bd33ec244ee0df3c2649e2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:08Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.300401 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l4c8x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"baab3391-6269-467e-be1c-c992c82ddd7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caac5bab62b2f161eafa08b0d713c2619e27e61b24b106286f9e08c46af2c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xkwcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l4c8x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:08Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.312843 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36afbdef-e971-4c88-b8fd-0f289b9dd07c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:36:49Z\\\",\\\"message\\\":\\\"W1125 09:36:38.376967 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:36:38.377570 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063398 cert, and key in /tmp/serving-cert-3164028442/serving-signer.crt, /tmp/serving-cert-3164028442/serving-signer.key\\\\nI1125 09:36:38.736353 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:36:38.739775 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:36:38.739926 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:36:38.743151 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3164028442/tls.crt::/tmp/serving-cert-3164028442/tls.key\\\\\\\"\\\\nF1125 09:36:49.315575 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:08Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.324064 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:08Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.335709 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.335744 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.335770 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.335786 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.335794 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:08Z","lastTransitionTime":"2025-11-25T09:37:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.340273 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34f44032-4367-4650-b4e0-02aa8d3209ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a71ae62f5763cc4804ec5c00a4c8619d470790e92dd99e1afe671ae166458c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkjjq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:08Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.352710 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-txnt5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e9f759-2eea-43cd-9e0a-6f149785c431\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vhlt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-txnt5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:08Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.363373 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:08Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.374357 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a8e6c353d6a9d159c07ccbd19c8d659f360fba227bd027a3d3e32aadff5724\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:08Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.384096 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13fffc919890dba4c50685bc1aa11c87a1c3e8ca4c22d40bf3993ecb29cb2b7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e04e784232d124ccfcfcbb42a1b3f2fe4dd737520c60374a9fe1d1dcd8a9a56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:08Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.400556 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b34a8b788c0ff6e0d0cb1dc375ec9e2ae9a748c911713adc69f9e37f576222ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a694f4eb324ec480c999e6d57a53599b61b5bb42c3f3342e4672bc90b2e095e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e15435107b0cdf606bf3d4f78ddfec8b6641611a912f3fea51ba1ffc3031df3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb143029f5bbd51e49119aba69fcb5ecb8febacbdfa089ccef1298a4f372278b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://225d630b20a5da5172de2d03044794bbf6ac21d95421a502d21c642c92421e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6acdb40732904b212fc33575cf482c9e194722160e66442aee0619cda7f2c755\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ed3ec0852aa5a69e2209e16488053b6d07aedc6620ab7e2964119e94e28d27a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ed3ec0852aa5a69e2209e16488053b6d07aedc6620ab7e2964119e94e28d27a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"message\\\":\\\"0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:37:07.571756 6150 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:37:07.571965 6150 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:37:07.572043 6150 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:37:07.572333 6150 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1125 09:37:07.572349 6150 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1125 09:37:07.572383 6150 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 09:37:07.572391 6150 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 09:37:07.572409 6150 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 09:37:07.572430 6150 factory.go:656] Stopping watch factory\\\\nI1125 09:37:07.572446 6150 ovnkube.go:599] Stopped ovnkube\\\\nI1125 09:37:07.572472 6150 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 09:37:07.572472 6150 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 09:37:07.572488 6150 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 09:37:07.572490 6150 metrics.go:553] Stopping metrics server at address \\\\\\\"127.\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50a479ca6721781365a54ff1fd8aad2b8b97e0f298fe46b62c51cbc2434e980b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gt7mq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:08Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.408973 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lf42b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0358b43-024d-430b-b886-c3ba51fb479e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://77304ae3027f1ed588bd648fe7573d6a3bc7502ab02db6687fd7f9a885429104\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9zdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lf42b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:08Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.438488 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.438527 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.438538 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.438557 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.438570 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:08Z","lastTransitionTime":"2025-11-25T09:37:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.541621 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.541755 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.541787 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.541817 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.541839 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:08Z","lastTransitionTime":"2025-11-25T09:37:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.644959 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.644999 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.645011 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.645026 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.645040 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:08Z","lastTransitionTime":"2025-11-25T09:37:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.748200 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.748245 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.748257 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.748276 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.748290 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:08Z","lastTransitionTime":"2025-11-25T09:37:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.851222 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.851277 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.851292 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.851310 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.851321 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:08Z","lastTransitionTime":"2025-11-25T09:37:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.953898 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.953957 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.953974 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.954021 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:08 crc kubenswrapper[4854]: I1125 09:37:08.954039 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:08Z","lastTransitionTime":"2025-11-25T09:37:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.013364 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:37:09 crc kubenswrapper[4854]: E1125 09:37:09.013496 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.056722 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.056753 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.056761 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.056775 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.056784 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:09Z","lastTransitionTime":"2025-11-25T09:37:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.159297 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.159345 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.159358 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.159377 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.159388 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:09Z","lastTransitionTime":"2025-11-25T09:37:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.261219 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.261269 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.261282 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.261303 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.261317 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:09Z","lastTransitionTime":"2025-11-25T09:37:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.363209 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.363266 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.363281 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.363302 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.363316 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:09Z","lastTransitionTime":"2025-11-25T09:37:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.466356 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.466404 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.466418 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.466437 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.466450 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:09Z","lastTransitionTime":"2025-11-25T09:37:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.511236 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zd22"] Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.511722 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zd22" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.515298 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.515422 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.529244 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lf42b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0358b43-024d-430b-b886-c3ba51fb479e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://77304ae3027f1ed588bd648fe7573d6a3bc7502ab02db6687fd7f9a885429104\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9zdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lf42b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:09Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.545222 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zd22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7cb2c023-6f3b-4c24-a49d-1f4686b5eca5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9v8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9v8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:37:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7zd22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:09Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.561255 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a8e6c353d6a9d159c07ccbd19c8d659f360fba227bd027a3d3e32aadff5724\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:09Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.568369 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.568401 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.568413 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.568433 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.568446 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:09Z","lastTransitionTime":"2025-11-25T09:37:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.584899 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13fffc919890dba4c50685bc1aa11c87a1c3e8ca4c22d40bf3993ecb29cb2b7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e04e784232d124ccfcfcbb42a1b3f2fe4dd737520c60374a9fe1d1dcd8a9a56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:09Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.609260 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b34a8b788c0ff6e0d0cb1dc375ec9e2ae9a748c911713adc69f9e37f576222ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a694f4eb324ec480c999e6d57a53599b61b5bb42c3f3342e4672bc90b2e095e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e15435107b0cdf606bf3d4f78ddfec8b6641611a912f3fea51ba1ffc3031df3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb143029f5bbd51e49119aba69fcb5ecb8febacbdfa089ccef1298a4f372278b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://225d630b20a5da5172de2d03044794bbf6ac21d95421a502d21c642c92421e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6acdb40732904b212fc33575cf482c9e194722160e66442aee0619cda7f2c755\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ed3ec0852aa5a69e2209e16488053b6d07aedc6620ab7e2964119e94e28d27a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ed3ec0852aa5a69e2209e16488053b6d07aedc6620ab7e2964119e94e28d27a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"message\\\":\\\"0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:37:07.571756 6150 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:37:07.571965 6150 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:37:07.572043 6150 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:37:07.572333 6150 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1125 09:37:07.572349 6150 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1125 09:37:07.572383 6150 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 09:37:07.572391 6150 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 09:37:07.572409 6150 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 09:37:07.572430 6150 factory.go:656] Stopping watch factory\\\\nI1125 09:37:07.572446 6150 ovnkube.go:599] Stopped ovnkube\\\\nI1125 09:37:07.572472 6150 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 09:37:07.572472 6150 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 09:37:07.572488 6150 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 09:37:07.572490 6150 metrics.go:553] Stopping metrics server at address \\\\\\\"127.\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50a479ca6721781365a54ff1fd8aad2b8b97e0f298fe46b62c51cbc2434e980b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gt7mq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:09Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.623295 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe8c3a2-bb2a-4a49-b104-fb0f10a74b74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c67a2da54854403d420ac7c05ea4211260914f5123d6ac5f086e22b88256a331\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://64018e6a7f7cdfcdd8339c676ebae79d9047a099a69048351406278d2a142863\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53b3a5ae9621d23b07fe07044dba09946f559dc5b8e75d8f1b6a2ccd0c672247\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:09Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.633955 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:09Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.642778 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b789d7d13805b447638f66bcfa89b997c9ad47c92e85a776e97e70ea1a7e950\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e898d302f42097c6c149260d69f6cdc0bc4088e1b86714c3344a375b16cd7a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9qdk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:09Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.648335 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t9v8p\" (UniqueName: \"kubernetes.io/projected/7cb2c023-6f3b-4c24-a49d-1f4686b5eca5-kube-api-access-t9v8p\") pod \"ovnkube-control-plane-749d76644c-7zd22\" (UID: \"7cb2c023-6f3b-4c24-a49d-1f4686b5eca5\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zd22" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.648381 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/7cb2c023-6f3b-4c24-a49d-1f4686b5eca5-env-overrides\") pod \"ovnkube-control-plane-749d76644c-7zd22\" (UID: \"7cb2c023-6f3b-4c24-a49d-1f4686b5eca5\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zd22" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.648418 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/7cb2c023-6f3b-4c24-a49d-1f4686b5eca5-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-7zd22\" (UID: \"7cb2c023-6f3b-4c24-a49d-1f4686b5eca5\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zd22" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.648470 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/7cb2c023-6f3b-4c24-a49d-1f4686b5eca5-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-7zd22\" (UID: \"7cb2c023-6f3b-4c24-a49d-1f4686b5eca5\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zd22" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.659054 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36afbdef-e971-4c88-b8fd-0f289b9dd07c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:36:49Z\\\",\\\"message\\\":\\\"W1125 09:36:38.376967 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:36:38.377570 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063398 cert, and key in /tmp/serving-cert-3164028442/serving-signer.crt, /tmp/serving-cert-3164028442/serving-signer.key\\\\nI1125 09:36:38.736353 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:36:38.739775 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:36:38.739926 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:36:38.743151 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3164028442/tls.crt::/tmp/serving-cert-3164028442/tls.key\\\\\\\"\\\\nF1125 09:36:49.315575 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:09Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.669463 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd00115c937d7bd2a5f0c16c846127b630c250ba8bd33ec244ee0df3c2649e2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:09Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.671004 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.671032 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.671041 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.671058 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.671081 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:09Z","lastTransitionTime":"2025-11-25T09:37:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.679621 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l4c8x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"baab3391-6269-467e-be1c-c992c82ddd7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caac5bab62b2f161eafa08b0d713c2619e27e61b24b106286f9e08c46af2c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xkwcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l4c8x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:09Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.690915 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:09Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.702619 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:09Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.714267 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34f44032-4367-4650-b4e0-02aa8d3209ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a71ae62f5763cc4804ec5c00a4c8619d470790e92dd99e1afe671ae166458c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkjjq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:09Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.725151 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-txnt5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e9f759-2eea-43cd-9e0a-6f149785c431\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vhlt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-txnt5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:09Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.749642 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/7cb2c023-6f3b-4c24-a49d-1f4686b5eca5-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-7zd22\" (UID: \"7cb2c023-6f3b-4c24-a49d-1f4686b5eca5\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zd22" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.750012 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t9v8p\" (UniqueName: \"kubernetes.io/projected/7cb2c023-6f3b-4c24-a49d-1f4686b5eca5-kube-api-access-t9v8p\") pod \"ovnkube-control-plane-749d76644c-7zd22\" (UID: \"7cb2c023-6f3b-4c24-a49d-1f4686b5eca5\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zd22" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.750120 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/7cb2c023-6f3b-4c24-a49d-1f4686b5eca5-env-overrides\") pod \"ovnkube-control-plane-749d76644c-7zd22\" (UID: \"7cb2c023-6f3b-4c24-a49d-1f4686b5eca5\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zd22" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.750230 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/7cb2c023-6f3b-4c24-a49d-1f4686b5eca5-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-7zd22\" (UID: \"7cb2c023-6f3b-4c24-a49d-1f4686b5eca5\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zd22" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.750574 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/7cb2c023-6f3b-4c24-a49d-1f4686b5eca5-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-7zd22\" (UID: \"7cb2c023-6f3b-4c24-a49d-1f4686b5eca5\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zd22" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.750716 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/7cb2c023-6f3b-4c24-a49d-1f4686b5eca5-env-overrides\") pod \"ovnkube-control-plane-749d76644c-7zd22\" (UID: \"7cb2c023-6f3b-4c24-a49d-1f4686b5eca5\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zd22" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.756503 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/7cb2c023-6f3b-4c24-a49d-1f4686b5eca5-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-7zd22\" (UID: \"7cb2c023-6f3b-4c24-a49d-1f4686b5eca5\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zd22" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.769790 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t9v8p\" (UniqueName: \"kubernetes.io/projected/7cb2c023-6f3b-4c24-a49d-1f4686b5eca5-kube-api-access-t9v8p\") pod \"ovnkube-control-plane-749d76644c-7zd22\" (UID: \"7cb2c023-6f3b-4c24-a49d-1f4686b5eca5\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zd22" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.772865 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.772901 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.772910 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.772924 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.772932 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:09Z","lastTransitionTime":"2025-11-25T09:37:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.832924 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zd22" Nov 25 09:37:09 crc kubenswrapper[4854]: W1125 09:37:09.848125 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7cb2c023_6f3b_4c24_a49d_1f4686b5eca5.slice/crio-b9e77ed6001cef2e0d4063df2c618ade7c2d3ca0dd49cde5fe9cbe8248d7f7ca WatchSource:0}: Error finding container b9e77ed6001cef2e0d4063df2c618ade7c2d3ca0dd49cde5fe9cbe8248d7f7ca: Status 404 returned error can't find the container with id b9e77ed6001cef2e0d4063df2c618ade7c2d3ca0dd49cde5fe9cbe8248d7f7ca Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.878780 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.878813 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.878822 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.878836 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.878845 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:09Z","lastTransitionTime":"2025-11-25T09:37:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.981973 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.982042 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.982066 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.982096 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:09 crc kubenswrapper[4854]: I1125 09:37:09.982116 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:09Z","lastTransitionTime":"2025-11-25T09:37:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.013355 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.013363 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:37:10 crc kubenswrapper[4854]: E1125 09:37:10.013533 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:37:10 crc kubenswrapper[4854]: E1125 09:37:10.013620 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.084141 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.084186 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.084196 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.084213 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.084224 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:10Z","lastTransitionTime":"2025-11-25T09:37:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.187315 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.187354 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.187362 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.187375 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.187385 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:10Z","lastTransitionTime":"2025-11-25T09:37:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.235259 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zd22" event={"ID":"7cb2c023-6f3b-4c24-a49d-1f4686b5eca5","Type":"ContainerStarted","Data":"b9e77ed6001cef2e0d4063df2c618ade7c2d3ca0dd49cde5fe9cbe8248d7f7ca"} Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.237265 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gt7mq_b2e0e8c4-35b0-4ca8-acec-d6c94cf76354/ovnkube-controller/0.log" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.240139 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" event={"ID":"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354","Type":"ContainerStarted","Data":"297b49165ae87405fe0a9b033ca58f7944adab1ddaf25c385ec6de493a037bef"} Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.294993 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.295028 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.295039 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.295052 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.295060 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:10Z","lastTransitionTime":"2025-11-25T09:37:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.397508 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.397540 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.397550 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.397576 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.397588 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:10Z","lastTransitionTime":"2025-11-25T09:37:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.499730 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.499758 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.499766 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.499777 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.499785 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:10Z","lastTransitionTime":"2025-11-25T09:37:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.601703 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.601738 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.601747 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.601760 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.601770 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:10Z","lastTransitionTime":"2025-11-25T09:37:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.704052 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.704116 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.704134 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.704158 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.704172 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:10Z","lastTransitionTime":"2025-11-25T09:37:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.807121 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.807190 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.807211 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.807235 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.807253 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:10Z","lastTransitionTime":"2025-11-25T09:37:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.910062 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.910101 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.910112 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.910127 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:10 crc kubenswrapper[4854]: I1125 09:37:10.910138 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:10Z","lastTransitionTime":"2025-11-25T09:37:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.012489 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:37:11 crc kubenswrapper[4854]: E1125 09:37:11.012732 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.013438 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.013477 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.013486 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.013501 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.013510 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:11Z","lastTransitionTime":"2025-11-25T09:37:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.017662 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-rbb99"] Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.018071 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:37:11 crc kubenswrapper[4854]: E1125 09:37:11.018120 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rbb99" podUID="377b0f2c-4152-40db-a4c1-be3126061d7e" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.035468 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36afbdef-e971-4c88-b8fd-0f289b9dd07c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:36:49Z\\\",\\\"message\\\":\\\"W1125 09:36:38.376967 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:36:38.377570 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063398 cert, and key in /tmp/serving-cert-3164028442/serving-signer.crt, /tmp/serving-cert-3164028442/serving-signer.key\\\\nI1125 09:36:38.736353 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:36:38.739775 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:36:38.739926 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:36:38.743151 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3164028442/tls.crt::/tmp/serving-cert-3164028442/tls.key\\\\\\\"\\\\nF1125 09:36:49.315575 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.047739 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd00115c937d7bd2a5f0c16c846127b630c250ba8bd33ec244ee0df3c2649e2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.057109 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l4c8x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"baab3391-6269-467e-be1c-c992c82ddd7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caac5bab62b2f161eafa08b0d713c2619e27e61b24b106286f9e08c46af2c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xkwcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l4c8x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.069226 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rbb99" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"377b0f2c-4152-40db-a4c1-be3126061d7e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr9p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr9p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:37:11Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rbb99\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.079808 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.091152 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.105497 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34f44032-4367-4650-b4e0-02aa8d3209ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a71ae62f5763cc4804ec5c00a4c8619d470790e92dd99e1afe671ae166458c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkjjq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.115681 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.115739 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.115753 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.115770 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.116124 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:11Z","lastTransitionTime":"2025-11-25T09:37:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.117512 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-txnt5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e9f759-2eea-43cd-9e0a-6f149785c431\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vhlt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-txnt5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.129374 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a8e6c353d6a9d159c07ccbd19c8d659f360fba227bd027a3d3e32aadff5724\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.144618 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13fffc919890dba4c50685bc1aa11c87a1c3e8ca4c22d40bf3993ecb29cb2b7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e04e784232d124ccfcfcbb42a1b3f2fe4dd737520c60374a9fe1d1dcd8a9a56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.162891 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/377b0f2c-4152-40db-a4c1-be3126061d7e-metrics-certs\") pod \"network-metrics-daemon-rbb99\" (UID: \"377b0f2c-4152-40db-a4c1-be3126061d7e\") " pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.162986 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rr9p6\" (UniqueName: \"kubernetes.io/projected/377b0f2c-4152-40db-a4c1-be3126061d7e-kube-api-access-rr9p6\") pod \"network-metrics-daemon-rbb99\" (UID: \"377b0f2c-4152-40db-a4c1-be3126061d7e\") " pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.165398 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b34a8b788c0ff6e0d0cb1dc375ec9e2ae9a748c911713adc69f9e37f576222ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a694f4eb324ec480c999e6d57a53599b61b5bb42c3f3342e4672bc90b2e095e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e15435107b0cdf606bf3d4f78ddfec8b6641611a912f3fea51ba1ffc3031df3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb143029f5bbd51e49119aba69fcb5ecb8febacbdfa089ccef1298a4f372278b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://225d630b20a5da5172de2d03044794bbf6ac21d95421a502d21c642c92421e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6acdb40732904b212fc33575cf482c9e194722160e66442aee0619cda7f2c755\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ed3ec0852aa5a69e2209e16488053b6d07aedc6620ab7e2964119e94e28d27a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ed3ec0852aa5a69e2209e16488053b6d07aedc6620ab7e2964119e94e28d27a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"message\\\":\\\"0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:37:07.571756 6150 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:37:07.571965 6150 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:37:07.572043 6150 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:37:07.572333 6150 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1125 09:37:07.572349 6150 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1125 09:37:07.572383 6150 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 09:37:07.572391 6150 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 09:37:07.572409 6150 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 09:37:07.572430 6150 factory.go:656] Stopping watch factory\\\\nI1125 09:37:07.572446 6150 ovnkube.go:599] Stopped ovnkube\\\\nI1125 09:37:07.572472 6150 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 09:37:07.572472 6150 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 09:37:07.572488 6150 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 09:37:07.572490 6150 metrics.go:553] Stopping metrics server at address \\\\\\\"127.\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50a479ca6721781365a54ff1fd8aad2b8b97e0f298fe46b62c51cbc2434e980b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gt7mq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.180779 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lf42b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0358b43-024d-430b-b886-c3ba51fb479e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://77304ae3027f1ed588bd648fe7573d6a3bc7502ab02db6687fd7f9a885429104\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9zdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lf42b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.194916 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zd22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7cb2c023-6f3b-4c24-a49d-1f4686b5eca5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9v8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9v8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:37:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7zd22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.211569 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe8c3a2-bb2a-4a49-b104-fb0f10a74b74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c67a2da54854403d420ac7c05ea4211260914f5123d6ac5f086e22b88256a331\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://64018e6a7f7cdfcdd8339c676ebae79d9047a099a69048351406278d2a142863\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53b3a5ae9621d23b07fe07044dba09946f559dc5b8e75d8f1b6a2ccd0c672247\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.219254 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.219316 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.219330 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.219351 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.219371 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:11Z","lastTransitionTime":"2025-11-25T09:37:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.227470 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.240517 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b789d7d13805b447638f66bcfa89b997c9ad47c92e85a776e97e70ea1a7e950\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e898d302f42097c6c149260d69f6cdc0bc4088e1b86714c3344a375b16cd7a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9qdk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.244870 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gt7mq_b2e0e8c4-35b0-4ca8-acec-d6c94cf76354/ovnkube-controller/1.log" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.245448 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gt7mq_b2e0e8c4-35b0-4ca8-acec-d6c94cf76354/ovnkube-controller/0.log" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.248016 4854 generic.go:334] "Generic (PLEG): container finished" podID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerID="297b49165ae87405fe0a9b033ca58f7944adab1ddaf25c385ec6de493a037bef" exitCode=1 Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.248057 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" event={"ID":"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354","Type":"ContainerDied","Data":"297b49165ae87405fe0a9b033ca58f7944adab1ddaf25c385ec6de493a037bef"} Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.248688 4854 scope.go:117] "RemoveContainer" containerID="6ed3ec0852aa5a69e2209e16488053b6d07aedc6620ab7e2964119e94e28d27a" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.248757 4854 scope.go:117] "RemoveContainer" containerID="297b49165ae87405fe0a9b033ca58f7944adab1ddaf25c385ec6de493a037bef" Nov 25 09:37:11 crc kubenswrapper[4854]: E1125 09:37:11.248941 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-gt7mq_openshift-ovn-kubernetes(b2e0e8c4-35b0-4ca8-acec-d6c94cf76354)\"" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.250858 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zd22" event={"ID":"7cb2c023-6f3b-4c24-a49d-1f4686b5eca5","Type":"ContainerStarted","Data":"c00a26dc878dfb14a49676fc2c48da211fe1d5a28609cd5f564512cdb6a60fe6"} Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.250890 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zd22" event={"ID":"7cb2c023-6f3b-4c24-a49d-1f4686b5eca5","Type":"ContainerStarted","Data":"defcab141d127cd4f8abcc2c984b23984b793a596c443ce6ab8317400f5a4ed5"} Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.264123 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-txnt5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e9f759-2eea-43cd-9e0a-6f149785c431\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vhlt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-txnt5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.264268 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/377b0f2c-4152-40db-a4c1-be3126061d7e-metrics-certs\") pod \"network-metrics-daemon-rbb99\" (UID: \"377b0f2c-4152-40db-a4c1-be3126061d7e\") " pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.264333 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rr9p6\" (UniqueName: \"kubernetes.io/projected/377b0f2c-4152-40db-a4c1-be3126061d7e-kube-api-access-rr9p6\") pod \"network-metrics-daemon-rbb99\" (UID: \"377b0f2c-4152-40db-a4c1-be3126061d7e\") " pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:37:11 crc kubenswrapper[4854]: E1125 09:37:11.264452 4854 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:37:11 crc kubenswrapper[4854]: E1125 09:37:11.265519 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/377b0f2c-4152-40db-a4c1-be3126061d7e-metrics-certs podName:377b0f2c-4152-40db-a4c1-be3126061d7e nodeName:}" failed. No retries permitted until 2025-11-25 09:37:11.765431439 +0000 UTC m=+37.618424815 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/377b0f2c-4152-40db-a4c1-be3126061d7e-metrics-certs") pod "network-metrics-daemon-rbb99" (UID: "377b0f2c-4152-40db-a4c1-be3126061d7e") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.279166 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.280665 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rr9p6\" (UniqueName: \"kubernetes.io/projected/377b0f2c-4152-40db-a4c1-be3126061d7e-kube-api-access-rr9p6\") pod \"network-metrics-daemon-rbb99\" (UID: \"377b0f2c-4152-40db-a4c1-be3126061d7e\") " pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.292045 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.309925 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34f44032-4367-4650-b4e0-02aa8d3209ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a71ae62f5763cc4804ec5c00a4c8619d470790e92dd99e1afe671ae166458c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkjjq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.321793 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.321825 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.321834 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.321847 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.321858 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:11Z","lastTransitionTime":"2025-11-25T09:37:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.330493 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b34a8b788c0ff6e0d0cb1dc375ec9e2ae9a748c911713adc69f9e37f576222ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a694f4eb324ec480c999e6d57a53599b61b5bb42c3f3342e4672bc90b2e095e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e15435107b0cdf606bf3d4f78ddfec8b6641611a912f3fea51ba1ffc3031df3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb143029f5bbd51e49119aba69fcb5ecb8febacbdfa089ccef1298a4f372278b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://225d630b20a5da5172de2d03044794bbf6ac21d95421a502d21c642c92421e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6acdb40732904b212fc33575cf482c9e194722160e66442aee0619cda7f2c755\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://297b49165ae87405fe0a9b033ca58f7944adab1ddaf25c385ec6de493a037bef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ed3ec0852aa5a69e2209e16488053b6d07aedc6620ab7e2964119e94e28d27a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"message\\\":\\\"0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:37:07.571756 6150 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:37:07.571965 6150 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:37:07.572043 6150 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:37:07.572333 6150 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1125 09:37:07.572349 6150 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1125 09:37:07.572383 6150 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 09:37:07.572391 6150 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 09:37:07.572409 6150 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 09:37:07.572430 6150 factory.go:656] Stopping watch factory\\\\nI1125 09:37:07.572446 6150 ovnkube.go:599] Stopped ovnkube\\\\nI1125 09:37:07.572472 6150 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 09:37:07.572472 6150 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 09:37:07.572488 6150 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 09:37:07.572490 6150 metrics.go:553] Stopping metrics server at address \\\\\\\"127.\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://297b49165ae87405fe0a9b033ca58f7944adab1ddaf25c385ec6de493a037bef\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:37:10Z\\\",\\\"message\\\":\\\"penshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-secret-name:metrics-tls service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{config.openshift.io/v1 ClusterVersion version 9101b518-476b-4eea-8fa6-69b0534e5caa 0xc0006e16b7 \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Name:metrics,Protocol:TCP,Port:9393,TargetPort:{1 0 metrics},NodePort:0,AppProtocol:nil,},},Selector:map[string]string{name: dns-operator,},ClusterIP:10.217.4.174,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.174],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nF1125 09:37:10.701837 6338 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller init\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50a479ca6721781365a54ff1fd8aad2b8b97e0f298fe46b62c51cbc2434e980b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gt7mq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.340895 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lf42b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0358b43-024d-430b-b886-c3ba51fb479e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://77304ae3027f1ed588bd648fe7573d6a3bc7502ab02db6687fd7f9a885429104\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9zdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lf42b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.351134 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zd22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7cb2c023-6f3b-4c24-a49d-1f4686b5eca5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9v8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9v8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:37:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7zd22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.365812 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a8e6c353d6a9d159c07ccbd19c8d659f360fba227bd027a3d3e32aadff5724\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.380404 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13fffc919890dba4c50685bc1aa11c87a1c3e8ca4c22d40bf3993ecb29cb2b7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e04e784232d124ccfcfcbb42a1b3f2fe4dd737520c60374a9fe1d1dcd8a9a56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.392134 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b789d7d13805b447638f66bcfa89b997c9ad47c92e85a776e97e70ea1a7e950\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e898d302f42097c6c149260d69f6cdc0bc4088e1b86714c3344a375b16cd7a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9qdk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.405249 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe8c3a2-bb2a-4a49-b104-fb0f10a74b74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c67a2da54854403d420ac7c05ea4211260914f5123d6ac5f086e22b88256a331\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://64018e6a7f7cdfcdd8339c676ebae79d9047a099a69048351406278d2a142863\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53b3a5ae9621d23b07fe07044dba09946f559dc5b8e75d8f1b6a2ccd0c672247\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.421289 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.423624 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.423663 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.423707 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.423725 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.423737 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:11Z","lastTransitionTime":"2025-11-25T09:37:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.431062 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l4c8x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"baab3391-6269-467e-be1c-c992c82ddd7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caac5bab62b2f161eafa08b0d713c2619e27e61b24b106286f9e08c46af2c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xkwcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l4c8x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.442877 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rbb99" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"377b0f2c-4152-40db-a4c1-be3126061d7e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr9p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr9p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:37:11Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rbb99\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.457547 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36afbdef-e971-4c88-b8fd-0f289b9dd07c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:36:49Z\\\",\\\"message\\\":\\\"W1125 09:36:38.376967 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:36:38.377570 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063398 cert, and key in /tmp/serving-cert-3164028442/serving-signer.crt, /tmp/serving-cert-3164028442/serving-signer.key\\\\nI1125 09:36:38.736353 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:36:38.739775 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:36:38.739926 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:36:38.743151 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3164028442/tls.crt::/tmp/serving-cert-3164028442/tls.key\\\\\\\"\\\\nF1125 09:36:49.315575 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.471944 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd00115c937d7bd2a5f0c16c846127b630c250ba8bd33ec244ee0df3c2649e2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.489865 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34f44032-4367-4650-b4e0-02aa8d3209ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a71ae62f5763cc4804ec5c00a4c8619d470790e92dd99e1afe671ae166458c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkjjq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.503913 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-txnt5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e9f759-2eea-43cd-9e0a-6f149785c431\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vhlt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-txnt5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.515069 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.525439 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.525482 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.525490 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.525504 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.525514 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:11Z","lastTransitionTime":"2025-11-25T09:37:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.526621 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.537752 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13fffc919890dba4c50685bc1aa11c87a1c3e8ca4c22d40bf3993ecb29cb2b7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e04e784232d124ccfcfcbb42a1b3f2fe4dd737520c60374a9fe1d1dcd8a9a56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.554040 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b34a8b788c0ff6e0d0cb1dc375ec9e2ae9a748c911713adc69f9e37f576222ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a694f4eb324ec480c999e6d57a53599b61b5bb42c3f3342e4672bc90b2e095e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e15435107b0cdf606bf3d4f78ddfec8b6641611a912f3fea51ba1ffc3031df3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb143029f5bbd51e49119aba69fcb5ecb8febacbdfa089ccef1298a4f372278b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://225d630b20a5da5172de2d03044794bbf6ac21d95421a502d21c642c92421e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6acdb40732904b212fc33575cf482c9e194722160e66442aee0619cda7f2c755\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://297b49165ae87405fe0a9b033ca58f7944adab1ddaf25c385ec6de493a037bef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ed3ec0852aa5a69e2209e16488053b6d07aedc6620ab7e2964119e94e28d27a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"message\\\":\\\"0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:37:07.571756 6150 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:37:07.571965 6150 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:37:07.572043 6150 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:37:07.572333 6150 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1125 09:37:07.572349 6150 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1125 09:37:07.572383 6150 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 09:37:07.572391 6150 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 09:37:07.572409 6150 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 09:37:07.572430 6150 factory.go:656] Stopping watch factory\\\\nI1125 09:37:07.572446 6150 ovnkube.go:599] Stopped ovnkube\\\\nI1125 09:37:07.572472 6150 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 09:37:07.572472 6150 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 09:37:07.572488 6150 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 09:37:07.572490 6150 metrics.go:553] Stopping metrics server at address \\\\\\\"127.\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://297b49165ae87405fe0a9b033ca58f7944adab1ddaf25c385ec6de493a037bef\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:37:10Z\\\",\\\"message\\\":\\\"penshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-secret-name:metrics-tls service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{config.openshift.io/v1 ClusterVersion version 9101b518-476b-4eea-8fa6-69b0534e5caa 0xc0006e16b7 \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Name:metrics,Protocol:TCP,Port:9393,TargetPort:{1 0 metrics},NodePort:0,AppProtocol:nil,},},Selector:map[string]string{name: dns-operator,},ClusterIP:10.217.4.174,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.174],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nF1125 09:37:10.701837 6338 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller init\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50a479ca6721781365a54ff1fd8aad2b8b97e0f298fe46b62c51cbc2434e980b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gt7mq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.562278 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lf42b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0358b43-024d-430b-b886-c3ba51fb479e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://77304ae3027f1ed588bd648fe7573d6a3bc7502ab02db6687fd7f9a885429104\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9zdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lf42b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.572228 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zd22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7cb2c023-6f3b-4c24-a49d-1f4686b5eca5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://defcab141d127cd4f8abcc2c984b23984b793a596c443ce6ab8317400f5a4ed5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9v8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00a26dc878dfb14a49676fc2c48da211fe1d5a28609cd5f564512cdb6a60fe6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9v8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:37:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7zd22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.583721 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a8e6c353d6a9d159c07ccbd19c8d659f360fba227bd027a3d3e32aadff5724\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.595826 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.606690 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b789d7d13805b447638f66bcfa89b997c9ad47c92e85a776e97e70ea1a7e950\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e898d302f42097c6c149260d69f6cdc0bc4088e1b86714c3344a375b16cd7a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9qdk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.620053 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe8c3a2-bb2a-4a49-b104-fb0f10a74b74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c67a2da54854403d420ac7c05ea4211260914f5123d6ac5f086e22b88256a331\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://64018e6a7f7cdfcdd8339c676ebae79d9047a099a69048351406278d2a142863\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53b3a5ae9621d23b07fe07044dba09946f559dc5b8e75d8f1b6a2ccd0c672247\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.627593 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.627631 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.627663 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.627696 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.627708 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:11Z","lastTransitionTime":"2025-11-25T09:37:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.632939 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd00115c937d7bd2a5f0c16c846127b630c250ba8bd33ec244ee0df3c2649e2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.644018 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l4c8x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"baab3391-6269-467e-be1c-c992c82ddd7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caac5bab62b2f161eafa08b0d713c2619e27e61b24b106286f9e08c46af2c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xkwcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l4c8x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.653464 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rbb99" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"377b0f2c-4152-40db-a4c1-be3126061d7e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr9p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr9p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:37:11Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rbb99\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.665587 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36afbdef-e971-4c88-b8fd-0f289b9dd07c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:36:49Z\\\",\\\"message\\\":\\\"W1125 09:36:38.376967 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:36:38.377570 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063398 cert, and key in /tmp/serving-cert-3164028442/serving-signer.crt, /tmp/serving-cert-3164028442/serving-signer.key\\\\nI1125 09:36:38.736353 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:36:38.739775 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:36:38.739926 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:36:38.743151 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3164028442/tls.crt::/tmp/serving-cert-3164028442/tls.key\\\\\\\"\\\\nF1125 09:36:49.315575 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.729819 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.729856 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.729866 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.729879 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.729888 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:11Z","lastTransitionTime":"2025-11-25T09:37:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.772199 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/377b0f2c-4152-40db-a4c1-be3126061d7e-metrics-certs\") pod \"network-metrics-daemon-rbb99\" (UID: \"377b0f2c-4152-40db-a4c1-be3126061d7e\") " pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:37:11 crc kubenswrapper[4854]: E1125 09:37:11.772352 4854 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:37:11 crc kubenswrapper[4854]: E1125 09:37:11.772445 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/377b0f2c-4152-40db-a4c1-be3126061d7e-metrics-certs podName:377b0f2c-4152-40db-a4c1-be3126061d7e nodeName:}" failed. No retries permitted until 2025-11-25 09:37:12.772424017 +0000 UTC m=+38.625417393 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/377b0f2c-4152-40db-a4c1-be3126061d7e-metrics-certs") pod "network-metrics-daemon-rbb99" (UID: "377b0f2c-4152-40db-a4c1-be3126061d7e") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.832440 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.832481 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.832493 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.832507 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.832518 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:11Z","lastTransitionTime":"2025-11-25T09:37:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.873260 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:37:11 crc kubenswrapper[4854]: E1125 09:37:11.873349 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:37:27.873331302 +0000 UTC m=+53.726324678 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.873430 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.873476 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.873534 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:37:11 crc kubenswrapper[4854]: E1125 09:37:11.873567 4854 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.873572 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:37:11 crc kubenswrapper[4854]: E1125 09:37:11.873601 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:37:27.8735939 +0000 UTC m=+53.726587276 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:37:11 crc kubenswrapper[4854]: E1125 09:37:11.873788 4854 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:37:11 crc kubenswrapper[4854]: E1125 09:37:11.873817 4854 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:37:11 crc kubenswrapper[4854]: E1125 09:37:11.873836 4854 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:37:11 crc kubenswrapper[4854]: E1125 09:37:11.873889 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 09:37:27.873873067 +0000 UTC m=+53.726866473 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:37:11 crc kubenswrapper[4854]: E1125 09:37:11.873934 4854 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:37:11 crc kubenswrapper[4854]: E1125 09:37:11.873947 4854 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:37:11 crc kubenswrapper[4854]: E1125 09:37:11.873947 4854 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:37:11 crc kubenswrapper[4854]: E1125 09:37:11.873955 4854 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:37:11 crc kubenswrapper[4854]: E1125 09:37:11.873987 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:37:27.8739749 +0000 UTC m=+53.726968306 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:37:11 crc kubenswrapper[4854]: E1125 09:37:11.874007 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 09:37:27.873997071 +0000 UTC m=+53.726990487 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.934106 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.934145 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.934154 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.934168 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:11 crc kubenswrapper[4854]: I1125 09:37:11.934178 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:11Z","lastTransitionTime":"2025-11-25T09:37:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.012742 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.012837 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:37:12 crc kubenswrapper[4854]: E1125 09:37:12.012860 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:37:12 crc kubenswrapper[4854]: E1125 09:37:12.013016 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.036283 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.036357 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.036382 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.036413 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.036432 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:12Z","lastTransitionTime":"2025-11-25T09:37:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.140752 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.140870 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.140888 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.140909 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.140928 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:12Z","lastTransitionTime":"2025-11-25T09:37:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.243463 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.243532 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.243549 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.243569 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.243584 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:12Z","lastTransitionTime":"2025-11-25T09:37:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.255717 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gt7mq_b2e0e8c4-35b0-4ca8-acec-d6c94cf76354/ovnkube-controller/1.log" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.258986 4854 scope.go:117] "RemoveContainer" containerID="297b49165ae87405fe0a9b033ca58f7944adab1ddaf25c385ec6de493a037bef" Nov 25 09:37:12 crc kubenswrapper[4854]: E1125 09:37:12.259218 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-gt7mq_openshift-ovn-kubernetes(b2e0e8c4-35b0-4ca8-acec-d6c94cf76354)\"" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.274035 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.289027 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.305847 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34f44032-4367-4650-b4e0-02aa8d3209ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a71ae62f5763cc4804ec5c00a4c8619d470790e92dd99e1afe671ae166458c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkjjq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.319229 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-txnt5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e9f759-2eea-43cd-9e0a-6f149785c431\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vhlt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-txnt5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.333856 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a8e6c353d6a9d159c07ccbd19c8d659f360fba227bd027a3d3e32aadff5724\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.347107 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.347188 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.347204 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.347231 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.347247 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:12Z","lastTransitionTime":"2025-11-25T09:37:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.348227 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13fffc919890dba4c50685bc1aa11c87a1c3e8ca4c22d40bf3993ecb29cb2b7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e04e784232d124ccfcfcbb42a1b3f2fe4dd737520c60374a9fe1d1dcd8a9a56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.367730 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b34a8b788c0ff6e0d0cb1dc375ec9e2ae9a748c911713adc69f9e37f576222ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a694f4eb324ec480c999e6d57a53599b61b5bb42c3f3342e4672bc90b2e095e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e15435107b0cdf606bf3d4f78ddfec8b6641611a912f3fea51ba1ffc3031df3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb143029f5bbd51e49119aba69fcb5ecb8febacbdfa089ccef1298a4f372278b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://225d630b20a5da5172de2d03044794bbf6ac21d95421a502d21c642c92421e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6acdb40732904b212fc33575cf482c9e194722160e66442aee0619cda7f2c755\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://297b49165ae87405fe0a9b033ca58f7944adab1ddaf25c385ec6de493a037bef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://297b49165ae87405fe0a9b033ca58f7944adab1ddaf25c385ec6de493a037bef\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:37:10Z\\\",\\\"message\\\":\\\"penshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-secret-name:metrics-tls service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{config.openshift.io/v1 ClusterVersion version 9101b518-476b-4eea-8fa6-69b0534e5caa 0xc0006e16b7 \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Name:metrics,Protocol:TCP,Port:9393,TargetPort:{1 0 metrics},NodePort:0,AppProtocol:nil,},},Selector:map[string]string{name: dns-operator,},ClusterIP:10.217.4.174,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.174],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nF1125 09:37:10.701837 6338 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller init\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-gt7mq_openshift-ovn-kubernetes(b2e0e8c4-35b0-4ca8-acec-d6c94cf76354)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50a479ca6721781365a54ff1fd8aad2b8b97e0f298fe46b62c51cbc2434e980b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gt7mq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.379701 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lf42b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0358b43-024d-430b-b886-c3ba51fb479e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://77304ae3027f1ed588bd648fe7573d6a3bc7502ab02db6687fd7f9a885429104\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9zdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lf42b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.396556 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zd22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7cb2c023-6f3b-4c24-a49d-1f4686b5eca5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://defcab141d127cd4f8abcc2c984b23984b793a596c443ce6ab8317400f5a4ed5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9v8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00a26dc878dfb14a49676fc2c48da211fe1d5a28609cd5f564512cdb6a60fe6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9v8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:37:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7zd22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.411716 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe8c3a2-bb2a-4a49-b104-fb0f10a74b74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c67a2da54854403d420ac7c05ea4211260914f5123d6ac5f086e22b88256a331\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://64018e6a7f7cdfcdd8339c676ebae79d9047a099a69048351406278d2a142863\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53b3a5ae9621d23b07fe07044dba09946f559dc5b8e75d8f1b6a2ccd0c672247\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.426620 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.441444 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b789d7d13805b447638f66bcfa89b997c9ad47c92e85a776e97e70ea1a7e950\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e898d302f42097c6c149260d69f6cdc0bc4088e1b86714c3344a375b16cd7a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9qdk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.448901 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.448946 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.448955 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.448972 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.448989 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:12Z","lastTransitionTime":"2025-11-25T09:37:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.453890 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.453910 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.453917 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.453928 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.453939 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:12Z","lastTransitionTime":"2025-11-25T09:37:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.458895 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36afbdef-e971-4c88-b8fd-0f289b9dd07c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:36:49Z\\\",\\\"message\\\":\\\"W1125 09:36:38.376967 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:36:38.377570 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063398 cert, and key in /tmp/serving-cert-3164028442/serving-signer.crt, /tmp/serving-cert-3164028442/serving-signer.key\\\\nI1125 09:36:38.736353 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:36:38.739775 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:36:38.739926 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:36:38.743151 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3164028442/tls.crt::/tmp/serving-cert-3164028442/tls.key\\\\\\\"\\\\nF1125 09:36:49.315575 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:12 crc kubenswrapper[4854]: E1125 09:37:12.463603 4854 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a363dd8e-616a-41fb-b3a6-8f9b7ff40e37\\\",\\\"systemUUID\\\":\\\"9ec5d79d-dba9-49c0-8c51-26f030e53128\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.466306 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.466396 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.466468 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.466553 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.466609 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:12Z","lastTransitionTime":"2025-11-25T09:37:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.469073 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd00115c937d7bd2a5f0c16c846127b630c250ba8bd33ec244ee0df3c2649e2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:12 crc kubenswrapper[4854]: E1125 09:37:12.476558 4854 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a363dd8e-616a-41fb-b3a6-8f9b7ff40e37\\\",\\\"systemUUID\\\":\\\"9ec5d79d-dba9-49c0-8c51-26f030e53128\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.479408 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l4c8x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"baab3391-6269-467e-be1c-c992c82ddd7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caac5bab62b2f161eafa08b0d713c2619e27e61b24b106286f9e08c46af2c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xkwcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l4c8x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.479630 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.479697 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.479716 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.479739 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.479755 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:12Z","lastTransitionTime":"2025-11-25T09:37:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.488688 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rbb99" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"377b0f2c-4152-40db-a4c1-be3126061d7e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr9p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr9p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:37:11Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rbb99\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:12 crc kubenswrapper[4854]: E1125 09:37:12.490822 4854 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a363dd8e-616a-41fb-b3a6-8f9b7ff40e37\\\",\\\"systemUUID\\\":\\\"9ec5d79d-dba9-49c0-8c51-26f030e53128\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.493704 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.493946 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.494027 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.494140 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.494201 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:12Z","lastTransitionTime":"2025-11-25T09:37:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:12 crc kubenswrapper[4854]: E1125 09:37:12.504703 4854 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a363dd8e-616a-41fb-b3a6-8f9b7ff40e37\\\",\\\"systemUUID\\\":\\\"9ec5d79d-dba9-49c0-8c51-26f030e53128\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.507364 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.507398 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.507409 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.507424 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.507435 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:12Z","lastTransitionTime":"2025-11-25T09:37:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:12 crc kubenswrapper[4854]: E1125 09:37:12.520424 4854 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a363dd8e-616a-41fb-b3a6-8f9b7ff40e37\\\",\\\"systemUUID\\\":\\\"9ec5d79d-dba9-49c0-8c51-26f030e53128\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:12 crc kubenswrapper[4854]: E1125 09:37:12.520579 4854 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.551144 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.551179 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.551190 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.551206 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.551219 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:12Z","lastTransitionTime":"2025-11-25T09:37:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.654249 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.654316 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.654327 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.654340 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.654348 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:12Z","lastTransitionTime":"2025-11-25T09:37:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.756569 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.756618 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.756628 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.756643 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.756652 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:12Z","lastTransitionTime":"2025-11-25T09:37:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.783629 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/377b0f2c-4152-40db-a4c1-be3126061d7e-metrics-certs\") pod \"network-metrics-daemon-rbb99\" (UID: \"377b0f2c-4152-40db-a4c1-be3126061d7e\") " pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:37:12 crc kubenswrapper[4854]: E1125 09:37:12.783881 4854 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:37:12 crc kubenswrapper[4854]: E1125 09:37:12.784015 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/377b0f2c-4152-40db-a4c1-be3126061d7e-metrics-certs podName:377b0f2c-4152-40db-a4c1-be3126061d7e nodeName:}" failed. No retries permitted until 2025-11-25 09:37:14.783975594 +0000 UTC m=+40.636969060 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/377b0f2c-4152-40db-a4c1-be3126061d7e-metrics-certs") pod "network-metrics-daemon-rbb99" (UID: "377b0f2c-4152-40db-a4c1-be3126061d7e") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.859098 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.859142 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.859154 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.859171 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.859183 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:12Z","lastTransitionTime":"2025-11-25T09:37:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.961073 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.961123 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.961138 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.961156 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:12 crc kubenswrapper[4854]: I1125 09:37:12.961170 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:12Z","lastTransitionTime":"2025-11-25T09:37:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.012398 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.012419 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:37:13 crc kubenswrapper[4854]: E1125 09:37:13.012567 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rbb99" podUID="377b0f2c-4152-40db-a4c1-be3126061d7e" Nov 25 09:37:13 crc kubenswrapper[4854]: E1125 09:37:13.012746 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.063370 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.063398 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.063408 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.063423 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.063435 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:13Z","lastTransitionTime":"2025-11-25T09:37:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.166467 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.166818 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.166833 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.166852 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.166864 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:13Z","lastTransitionTime":"2025-11-25T09:37:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.269333 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.269370 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.269381 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.269397 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.269407 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:13Z","lastTransitionTime":"2025-11-25T09:37:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.371760 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.371797 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.371808 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.371822 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.371831 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:13Z","lastTransitionTime":"2025-11-25T09:37:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.474194 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.474234 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.474244 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.474259 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.474269 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:13Z","lastTransitionTime":"2025-11-25T09:37:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.576691 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.576759 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.576773 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.576788 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.576819 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:13Z","lastTransitionTime":"2025-11-25T09:37:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.679576 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.679618 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.679627 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.679643 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.679652 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:13Z","lastTransitionTime":"2025-11-25T09:37:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.781965 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.782011 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.782021 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.782039 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.782051 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:13Z","lastTransitionTime":"2025-11-25T09:37:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.884020 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.884056 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.884064 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.884076 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.884085 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:13Z","lastTransitionTime":"2025-11-25T09:37:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.985986 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.986042 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.986054 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.986072 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:13 crc kubenswrapper[4854]: I1125 09:37:13.986083 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:13Z","lastTransitionTime":"2025-11-25T09:37:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.038039 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:37:14 crc kubenswrapper[4854]: E1125 09:37:14.038162 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.038471 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:37:14 crc kubenswrapper[4854]: E1125 09:37:14.038531 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.038587 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:37:14 crc kubenswrapper[4854]: E1125 09:37:14.038687 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rbb99" podUID="377b0f2c-4152-40db-a4c1-be3126061d7e" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.038735 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:37:14 crc kubenswrapper[4854]: E1125 09:37:14.038783 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.088118 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.088170 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.088184 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.088199 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.088209 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:14Z","lastTransitionTime":"2025-11-25T09:37:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.190479 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.190532 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.190542 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.190560 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.190571 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:14Z","lastTransitionTime":"2025-11-25T09:37:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.292764 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.292824 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.292833 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.292849 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.292861 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:14Z","lastTransitionTime":"2025-11-25T09:37:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.395279 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.395321 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.395334 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.395351 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.395361 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:14Z","lastTransitionTime":"2025-11-25T09:37:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.497616 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.497659 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.497702 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.497721 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.497732 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:14Z","lastTransitionTime":"2025-11-25T09:37:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.600189 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.600241 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.600264 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.600295 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.600312 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:14Z","lastTransitionTime":"2025-11-25T09:37:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.702547 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.702601 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.702617 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.702636 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.702651 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:14Z","lastTransitionTime":"2025-11-25T09:37:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.805781 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.805819 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.805829 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.805844 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.805855 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:14Z","lastTransitionTime":"2025-11-25T09:37:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.846871 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/377b0f2c-4152-40db-a4c1-be3126061d7e-metrics-certs\") pod \"network-metrics-daemon-rbb99\" (UID: \"377b0f2c-4152-40db-a4c1-be3126061d7e\") " pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:37:14 crc kubenswrapper[4854]: E1125 09:37:14.847088 4854 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:37:14 crc kubenswrapper[4854]: E1125 09:37:14.847282 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/377b0f2c-4152-40db-a4c1-be3126061d7e-metrics-certs podName:377b0f2c-4152-40db-a4c1-be3126061d7e nodeName:}" failed. No retries permitted until 2025-11-25 09:37:18.847261015 +0000 UTC m=+44.700254391 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/377b0f2c-4152-40db-a4c1-be3126061d7e-metrics-certs") pod "network-metrics-daemon-rbb99" (UID: "377b0f2c-4152-40db-a4c1-be3126061d7e") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.907871 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.907910 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.907918 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.907931 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:14 crc kubenswrapper[4854]: I1125 09:37:14.907940 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:14Z","lastTransitionTime":"2025-11-25T09:37:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.010425 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.010641 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.010760 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.010885 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.010978 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:15Z","lastTransitionTime":"2025-11-25T09:37:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.024520 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:15Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.037451 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34f44032-4367-4650-b4e0-02aa8d3209ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a71ae62f5763cc4804ec5c00a4c8619d470790e92dd99e1afe671ae166458c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkjjq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:15Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.048445 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-txnt5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e9f759-2eea-43cd-9e0a-6f149785c431\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vhlt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-txnt5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:15Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.058931 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:15Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.070596 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a8e6c353d6a9d159c07ccbd19c8d659f360fba227bd027a3d3e32aadff5724\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:15Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.085335 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13fffc919890dba4c50685bc1aa11c87a1c3e8ca4c22d40bf3993ecb29cb2b7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e04e784232d124ccfcfcbb42a1b3f2fe4dd737520c60374a9fe1d1dcd8a9a56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:15Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.105581 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b34a8b788c0ff6e0d0cb1dc375ec9e2ae9a748c911713adc69f9e37f576222ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a694f4eb324ec480c999e6d57a53599b61b5bb42c3f3342e4672bc90b2e095e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e15435107b0cdf606bf3d4f78ddfec8b6641611a912f3fea51ba1ffc3031df3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb143029f5bbd51e49119aba69fcb5ecb8febacbdfa089ccef1298a4f372278b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://225d630b20a5da5172de2d03044794bbf6ac21d95421a502d21c642c92421e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6acdb40732904b212fc33575cf482c9e194722160e66442aee0619cda7f2c755\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://297b49165ae87405fe0a9b033ca58f7944adab1ddaf25c385ec6de493a037bef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://297b49165ae87405fe0a9b033ca58f7944adab1ddaf25c385ec6de493a037bef\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:37:10Z\\\",\\\"message\\\":\\\"penshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-secret-name:metrics-tls service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{config.openshift.io/v1 ClusterVersion version 9101b518-476b-4eea-8fa6-69b0534e5caa 0xc0006e16b7 \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Name:metrics,Protocol:TCP,Port:9393,TargetPort:{1 0 metrics},NodePort:0,AppProtocol:nil,},},Selector:map[string]string{name: dns-operator,},ClusterIP:10.217.4.174,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.174],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nF1125 09:37:10.701837 6338 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller init\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-gt7mq_openshift-ovn-kubernetes(b2e0e8c4-35b0-4ca8-acec-d6c94cf76354)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50a479ca6721781365a54ff1fd8aad2b8b97e0f298fe46b62c51cbc2434e980b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gt7mq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:15Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.113764 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.113808 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.113822 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.113840 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.113853 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:15Z","lastTransitionTime":"2025-11-25T09:37:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.115157 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lf42b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0358b43-024d-430b-b886-c3ba51fb479e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://77304ae3027f1ed588bd648fe7573d6a3bc7502ab02db6687fd7f9a885429104\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9zdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lf42b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:15Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.126547 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zd22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7cb2c023-6f3b-4c24-a49d-1f4686b5eca5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://defcab141d127cd4f8abcc2c984b23984b793a596c443ce6ab8317400f5a4ed5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9v8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00a26dc878dfb14a49676fc2c48da211fe1d5a28609cd5f564512cdb6a60fe6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9v8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:37:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7zd22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:15Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.139552 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe8c3a2-bb2a-4a49-b104-fb0f10a74b74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c67a2da54854403d420ac7c05ea4211260914f5123d6ac5f086e22b88256a331\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://64018e6a7f7cdfcdd8339c676ebae79d9047a099a69048351406278d2a142863\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53b3a5ae9621d23b07fe07044dba09946f559dc5b8e75d8f1b6a2ccd0c672247\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:15Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.153327 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:15Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.162479 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b789d7d13805b447638f66bcfa89b997c9ad47c92e85a776e97e70ea1a7e950\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e898d302f42097c6c149260d69f6cdc0bc4088e1b86714c3344a375b16cd7a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9qdk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:15Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.174115 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd00115c937d7bd2a5f0c16c846127b630c250ba8bd33ec244ee0df3c2649e2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:15Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.184400 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l4c8x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"baab3391-6269-467e-be1c-c992c82ddd7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caac5bab62b2f161eafa08b0d713c2619e27e61b24b106286f9e08c46af2c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xkwcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l4c8x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:15Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.193977 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rbb99" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"377b0f2c-4152-40db-a4c1-be3126061d7e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr9p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr9p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:37:11Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rbb99\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:15Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.205276 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36afbdef-e971-4c88-b8fd-0f289b9dd07c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:36:49Z\\\",\\\"message\\\":\\\"W1125 09:36:38.376967 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:36:38.377570 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063398 cert, and key in /tmp/serving-cert-3164028442/serving-signer.crt, /tmp/serving-cert-3164028442/serving-signer.key\\\\nI1125 09:36:38.736353 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:36:38.739775 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:36:38.739926 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:36:38.743151 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3164028442/tls.crt::/tmp/serving-cert-3164028442/tls.key\\\\\\\"\\\\nF1125 09:36:49.315575 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:15Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.215758 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.215796 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.215805 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.215817 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.215825 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:15Z","lastTransitionTime":"2025-11-25T09:37:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.318543 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.318577 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.318587 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.318602 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.318612 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:15Z","lastTransitionTime":"2025-11-25T09:37:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.421798 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.421839 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.421854 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.421871 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.421884 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:15Z","lastTransitionTime":"2025-11-25T09:37:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.524563 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.524647 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.524727 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.524758 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.524778 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:15Z","lastTransitionTime":"2025-11-25T09:37:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.626932 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.626979 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.626993 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.627008 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.627019 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:15Z","lastTransitionTime":"2025-11-25T09:37:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.730613 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.730891 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.730910 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.730933 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.730948 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:15Z","lastTransitionTime":"2025-11-25T09:37:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.833579 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.833637 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.833647 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.833692 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.833708 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:15Z","lastTransitionTime":"2025-11-25T09:37:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.937321 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.937369 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.937379 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.937398 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:15 crc kubenswrapper[4854]: I1125 09:37:15.937411 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:15Z","lastTransitionTime":"2025-11-25T09:37:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.012706 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.012731 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.012957 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:37:16 crc kubenswrapper[4854]: E1125 09:37:16.013027 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.013065 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:37:16 crc kubenswrapper[4854]: E1125 09:37:16.013208 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:37:16 crc kubenswrapper[4854]: E1125 09:37:16.013392 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:37:16 crc kubenswrapper[4854]: E1125 09:37:16.013525 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rbb99" podUID="377b0f2c-4152-40db-a4c1-be3126061d7e" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.039334 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.039375 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.039386 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.039406 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.039416 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:16Z","lastTransitionTime":"2025-11-25T09:37:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.142834 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.142886 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.142899 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.142918 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.142931 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:16Z","lastTransitionTime":"2025-11-25T09:37:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.245430 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.245489 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.245509 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.245532 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.245551 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:16Z","lastTransitionTime":"2025-11-25T09:37:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.348180 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.348239 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.348253 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.348271 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.348285 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:16Z","lastTransitionTime":"2025-11-25T09:37:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.450534 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.450581 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.450590 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.450605 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.450615 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:16Z","lastTransitionTime":"2025-11-25T09:37:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.553064 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.553106 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.553118 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.553135 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.553146 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:16Z","lastTransitionTime":"2025-11-25T09:37:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.655508 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.655559 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.655572 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.655595 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.655608 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:16Z","lastTransitionTime":"2025-11-25T09:37:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.757693 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.757768 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.757783 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.757800 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.757810 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:16Z","lastTransitionTime":"2025-11-25T09:37:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.859409 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.859435 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.859444 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.859458 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.859474 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:16Z","lastTransitionTime":"2025-11-25T09:37:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.962169 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.962228 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.962250 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.962279 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:16 crc kubenswrapper[4854]: I1125 09:37:16.962302 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:16Z","lastTransitionTime":"2025-11-25T09:37:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.064228 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.064539 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.064623 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.064743 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.064841 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:17Z","lastTransitionTime":"2025-11-25T09:37:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.169645 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.169746 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.169979 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.170011 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.170029 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:17Z","lastTransitionTime":"2025-11-25T09:37:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.275238 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.275268 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.275277 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.275291 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.275300 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:17Z","lastTransitionTime":"2025-11-25T09:37:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.378166 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.378416 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.378480 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.378543 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.378642 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:17Z","lastTransitionTime":"2025-11-25T09:37:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.480829 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.480864 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.480872 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.480889 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.480897 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:17Z","lastTransitionTime":"2025-11-25T09:37:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.582807 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.582857 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.582869 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.582884 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.582895 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:17Z","lastTransitionTime":"2025-11-25T09:37:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.686648 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.686994 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.687132 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.687270 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.687391 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:17Z","lastTransitionTime":"2025-11-25T09:37:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.790334 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.790383 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.790392 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.790409 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.790419 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:17Z","lastTransitionTime":"2025-11-25T09:37:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.893562 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.893604 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.893612 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.893629 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.893644 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:17Z","lastTransitionTime":"2025-11-25T09:37:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.996025 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.996074 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.996084 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.996099 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:17 crc kubenswrapper[4854]: I1125 09:37:17.996142 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:17Z","lastTransitionTime":"2025-11-25T09:37:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.013443 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.013443 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.013462 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:37:18 crc kubenswrapper[4854]: E1125 09:37:18.013783 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:37:18 crc kubenswrapper[4854]: E1125 09:37:18.013586 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rbb99" podUID="377b0f2c-4152-40db-a4c1-be3126061d7e" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.013466 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:37:18 crc kubenswrapper[4854]: E1125 09:37:18.013843 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:37:18 crc kubenswrapper[4854]: E1125 09:37:18.013935 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.098953 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.098988 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.098997 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.099011 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.099021 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:18Z","lastTransitionTime":"2025-11-25T09:37:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.201135 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.201172 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.201181 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.201199 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.201208 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:18Z","lastTransitionTime":"2025-11-25T09:37:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.304297 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.304350 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.304368 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.304392 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.304411 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:18Z","lastTransitionTime":"2025-11-25T09:37:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.406718 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.406793 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.406814 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.406829 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.406838 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:18Z","lastTransitionTime":"2025-11-25T09:37:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.509291 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.509336 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.509345 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.509363 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.509374 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:18Z","lastTransitionTime":"2025-11-25T09:37:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.611702 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.611745 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.611758 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.611775 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.611787 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:18Z","lastTransitionTime":"2025-11-25T09:37:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.714306 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.714358 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.714369 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.714385 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.714394 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:18Z","lastTransitionTime":"2025-11-25T09:37:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.817206 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.817245 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.817256 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.817274 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.817286 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:18Z","lastTransitionTime":"2025-11-25T09:37:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.886337 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/377b0f2c-4152-40db-a4c1-be3126061d7e-metrics-certs\") pod \"network-metrics-daemon-rbb99\" (UID: \"377b0f2c-4152-40db-a4c1-be3126061d7e\") " pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:37:18 crc kubenswrapper[4854]: E1125 09:37:18.886559 4854 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:37:18 crc kubenswrapper[4854]: E1125 09:37:18.886724 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/377b0f2c-4152-40db-a4c1-be3126061d7e-metrics-certs podName:377b0f2c-4152-40db-a4c1-be3126061d7e nodeName:}" failed. No retries permitted until 2025-11-25 09:37:26.886658483 +0000 UTC m=+52.739651919 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/377b0f2c-4152-40db-a4c1-be3126061d7e-metrics-certs") pod "network-metrics-daemon-rbb99" (UID: "377b0f2c-4152-40db-a4c1-be3126061d7e") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.919965 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.920005 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.920017 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.920034 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:18 crc kubenswrapper[4854]: I1125 09:37:18.920044 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:18Z","lastTransitionTime":"2025-11-25T09:37:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.022473 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.022767 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.022853 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.022938 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.023001 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:19Z","lastTransitionTime":"2025-11-25T09:37:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.125692 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.125743 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.125761 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.125782 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.125795 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:19Z","lastTransitionTime":"2025-11-25T09:37:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.227736 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.227796 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.227812 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.227833 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.227849 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:19Z","lastTransitionTime":"2025-11-25T09:37:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.330774 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.331112 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.331244 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.331353 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.331449 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:19Z","lastTransitionTime":"2025-11-25T09:37:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.434500 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.434538 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.434549 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.434566 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.434584 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:19Z","lastTransitionTime":"2025-11-25T09:37:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.536423 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.536489 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.536501 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.536517 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.536529 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:19Z","lastTransitionTime":"2025-11-25T09:37:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.541611 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.542344 4854 scope.go:117] "RemoveContainer" containerID="297b49165ae87405fe0a9b033ca58f7944adab1ddaf25c385ec6de493a037bef" Nov 25 09:37:19 crc kubenswrapper[4854]: E1125 09:37:19.542536 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-gt7mq_openshift-ovn-kubernetes(b2e0e8c4-35b0-4ca8-acec-d6c94cf76354)\"" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.638739 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.638777 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.638786 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.638801 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.638810 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:19Z","lastTransitionTime":"2025-11-25T09:37:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.741525 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.741573 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.741587 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.741614 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.741638 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:19Z","lastTransitionTime":"2025-11-25T09:37:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.844266 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.844320 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.844333 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.844350 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.844361 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:19Z","lastTransitionTime":"2025-11-25T09:37:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.946600 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.946639 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.946650 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.946694 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:19 crc kubenswrapper[4854]: I1125 09:37:19.946707 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:19Z","lastTransitionTime":"2025-11-25T09:37:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.012701 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.012738 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:37:20 crc kubenswrapper[4854]: E1125 09:37:20.012863 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.012918 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.012921 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:37:20 crc kubenswrapper[4854]: E1125 09:37:20.012962 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rbb99" podUID="377b0f2c-4152-40db-a4c1-be3126061d7e" Nov 25 09:37:20 crc kubenswrapper[4854]: E1125 09:37:20.013038 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:37:20 crc kubenswrapper[4854]: E1125 09:37:20.013166 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.049205 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.049271 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.049290 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.049314 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.049331 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:20Z","lastTransitionTime":"2025-11-25T09:37:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.151552 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.151598 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.151609 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.151624 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.151634 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:20Z","lastTransitionTime":"2025-11-25T09:37:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.254207 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.254244 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.254257 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.254281 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.254296 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:20Z","lastTransitionTime":"2025-11-25T09:37:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.356135 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.356165 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.356173 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.356185 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.356194 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:20Z","lastTransitionTime":"2025-11-25T09:37:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.458343 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.458378 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.458388 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.458402 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.458412 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:20Z","lastTransitionTime":"2025-11-25T09:37:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.560924 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.560967 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.560984 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.561002 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.561015 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:20Z","lastTransitionTime":"2025-11-25T09:37:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.663691 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.663742 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.663753 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.663768 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.663781 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:20Z","lastTransitionTime":"2025-11-25T09:37:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.766122 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.766170 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.766178 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.766193 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.766205 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:20Z","lastTransitionTime":"2025-11-25T09:37:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.869088 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.869127 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.869136 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.869150 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.869160 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:20Z","lastTransitionTime":"2025-11-25T09:37:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.971921 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.971975 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.971984 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.971998 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:20 crc kubenswrapper[4854]: I1125 09:37:20.972007 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:20Z","lastTransitionTime":"2025-11-25T09:37:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.074569 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.074627 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.074638 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.074653 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.074688 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:21Z","lastTransitionTime":"2025-11-25T09:37:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.177429 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.177470 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.177482 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.177497 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.177508 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:21Z","lastTransitionTime":"2025-11-25T09:37:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.279952 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.279993 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.280002 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.280019 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.280028 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:21Z","lastTransitionTime":"2025-11-25T09:37:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.382621 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.382683 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.382698 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.382731 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.382744 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:21Z","lastTransitionTime":"2025-11-25T09:37:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.485636 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.485745 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.485771 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.485800 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.485821 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:21Z","lastTransitionTime":"2025-11-25T09:37:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.588529 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.588556 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.588564 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.588578 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.588587 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:21Z","lastTransitionTime":"2025-11-25T09:37:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.691790 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.691864 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.691876 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.691892 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.691904 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:21Z","lastTransitionTime":"2025-11-25T09:37:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.793843 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.793907 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.793923 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.793992 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.794013 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:21Z","lastTransitionTime":"2025-11-25T09:37:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.896160 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.896207 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.896218 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.896235 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.896246 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:21Z","lastTransitionTime":"2025-11-25T09:37:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.999327 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.999409 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.999427 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.999452 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:21 crc kubenswrapper[4854]: I1125 09:37:21.999475 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:21Z","lastTransitionTime":"2025-11-25T09:37:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.012954 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.013030 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.013089 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.012998 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:37:22 crc kubenswrapper[4854]: E1125 09:37:22.013184 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:37:22 crc kubenswrapper[4854]: E1125 09:37:22.013324 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:37:22 crc kubenswrapper[4854]: E1125 09:37:22.013445 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rbb99" podUID="377b0f2c-4152-40db-a4c1-be3126061d7e" Nov 25 09:37:22 crc kubenswrapper[4854]: E1125 09:37:22.013565 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.102636 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.102717 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.102735 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.102756 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.102774 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:22Z","lastTransitionTime":"2025-11-25T09:37:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.204389 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.204423 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.204439 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.204454 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.204464 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:22Z","lastTransitionTime":"2025-11-25T09:37:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.306422 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.306471 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.306494 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.306526 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.306537 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:22Z","lastTransitionTime":"2025-11-25T09:37:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.408709 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.408747 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.408758 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.408775 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.408788 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:22Z","lastTransitionTime":"2025-11-25T09:37:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.511423 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.511463 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.511474 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.511490 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.511502 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:22Z","lastTransitionTime":"2025-11-25T09:37:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.580839 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.580873 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.580882 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.580894 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.580903 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:22Z","lastTransitionTime":"2025-11-25T09:37:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:22 crc kubenswrapper[4854]: E1125 09:37:22.592831 4854 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a363dd8e-616a-41fb-b3a6-8f9b7ff40e37\\\",\\\"systemUUID\\\":\\\"9ec5d79d-dba9-49c0-8c51-26f030e53128\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:22Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.596912 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.596981 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.597017 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.597032 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.597041 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:22Z","lastTransitionTime":"2025-11-25T09:37:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:22 crc kubenswrapper[4854]: E1125 09:37:22.613162 4854 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a363dd8e-616a-41fb-b3a6-8f9b7ff40e37\\\",\\\"systemUUID\\\":\\\"9ec5d79d-dba9-49c0-8c51-26f030e53128\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:22Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.616708 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.616750 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.616762 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.616778 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.616788 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:22Z","lastTransitionTime":"2025-11-25T09:37:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:22 crc kubenswrapper[4854]: E1125 09:37:22.630489 4854 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a363dd8e-616a-41fb-b3a6-8f9b7ff40e37\\\",\\\"systemUUID\\\":\\\"9ec5d79d-dba9-49c0-8c51-26f030e53128\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:22Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.633728 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.633768 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.633777 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.633791 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.633801 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:22Z","lastTransitionTime":"2025-11-25T09:37:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:22 crc kubenswrapper[4854]: E1125 09:37:22.644626 4854 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a363dd8e-616a-41fb-b3a6-8f9b7ff40e37\\\",\\\"systemUUID\\\":\\\"9ec5d79d-dba9-49c0-8c51-26f030e53128\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:22Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.647753 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.647814 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.647823 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.647837 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.647846 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:22Z","lastTransitionTime":"2025-11-25T09:37:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:22 crc kubenswrapper[4854]: E1125 09:37:22.659736 4854 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a363dd8e-616a-41fb-b3a6-8f9b7ff40e37\\\",\\\"systemUUID\\\":\\\"9ec5d79d-dba9-49c0-8c51-26f030e53128\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:22Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:22 crc kubenswrapper[4854]: E1125 09:37:22.659902 4854 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.661448 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.661479 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.661487 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.661504 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.661544 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:22Z","lastTransitionTime":"2025-11-25T09:37:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.763868 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.763923 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.763934 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.763953 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.763964 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:22Z","lastTransitionTime":"2025-11-25T09:37:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.866933 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.867012 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.867024 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.867048 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.867063 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:22Z","lastTransitionTime":"2025-11-25T09:37:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.970295 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.970389 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.970408 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.970432 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:22 crc kubenswrapper[4854]: I1125 09:37:22.970451 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:22Z","lastTransitionTime":"2025-11-25T09:37:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.073787 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.073837 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.073847 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.073889 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.073905 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:23Z","lastTransitionTime":"2025-11-25T09:37:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.176442 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.176492 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.176503 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.176521 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.176532 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:23Z","lastTransitionTime":"2025-11-25T09:37:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.279598 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.279650 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.279662 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.279697 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.279714 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:23Z","lastTransitionTime":"2025-11-25T09:37:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.382647 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.382737 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.382756 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.382781 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.382800 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:23Z","lastTransitionTime":"2025-11-25T09:37:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.492870 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.492901 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.492910 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.492922 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.492930 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:23Z","lastTransitionTime":"2025-11-25T09:37:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.596341 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.596407 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.596424 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.596447 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.596464 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:23Z","lastTransitionTime":"2025-11-25T09:37:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.699271 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.699340 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.699360 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.699385 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.699403 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:23Z","lastTransitionTime":"2025-11-25T09:37:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.802351 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.802408 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.802420 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.802438 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.802451 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:23Z","lastTransitionTime":"2025-11-25T09:37:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.904879 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.904998 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.905018 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.905072 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:23 crc kubenswrapper[4854]: I1125 09:37:23.905087 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:23Z","lastTransitionTime":"2025-11-25T09:37:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.008266 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.008311 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.008328 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.008346 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.008359 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:24Z","lastTransitionTime":"2025-11-25T09:37:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.012932 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.012958 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.013042 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:37:24 crc kubenswrapper[4854]: E1125 09:37:24.013249 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.013314 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:37:24 crc kubenswrapper[4854]: E1125 09:37:24.013444 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:37:24 crc kubenswrapper[4854]: E1125 09:37:24.013589 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rbb99" podUID="377b0f2c-4152-40db-a4c1-be3126061d7e" Nov 25 09:37:24 crc kubenswrapper[4854]: E1125 09:37:24.013747 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.110488 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.110545 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.110562 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.110586 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.110603 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:24Z","lastTransitionTime":"2025-11-25T09:37:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.212860 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.212897 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.212905 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.212921 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.212930 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:24Z","lastTransitionTime":"2025-11-25T09:37:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.319864 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.319933 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.319942 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.319955 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.319963 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:24Z","lastTransitionTime":"2025-11-25T09:37:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.422439 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.422496 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.422508 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.422527 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.422537 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:24Z","lastTransitionTime":"2025-11-25T09:37:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.524759 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.524830 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.524844 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.524860 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.524869 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:24Z","lastTransitionTime":"2025-11-25T09:37:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.626870 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.626927 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.626945 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.626967 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.626983 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:24Z","lastTransitionTime":"2025-11-25T09:37:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.729185 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.729236 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.729249 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.729267 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.729279 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:24Z","lastTransitionTime":"2025-11-25T09:37:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.831302 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.831337 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.831346 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.831361 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.831370 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:24Z","lastTransitionTime":"2025-11-25T09:37:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.933718 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.933788 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.933800 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.933816 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:24 crc kubenswrapper[4854]: I1125 09:37:24.933828 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:24Z","lastTransitionTime":"2025-11-25T09:37:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.025452 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zd22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7cb2c023-6f3b-4c24-a49d-1f4686b5eca5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://defcab141d127cd4f8abcc2c984b23984b793a596c443ce6ab8317400f5a4ed5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9v8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00a26dc878dfb14a49676fc2c48da211fe1d5a28609cd5f564512cdb6a60fe6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9v8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:37:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7zd22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.037256 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.037560 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.037663 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.037765 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.037950 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:25Z","lastTransitionTime":"2025-11-25T09:37:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.040787 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a8e6c353d6a9d159c07ccbd19c8d659f360fba227bd027a3d3e32aadff5724\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.052821 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13fffc919890dba4c50685bc1aa11c87a1c3e8ca4c22d40bf3993ecb29cb2b7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e04e784232d124ccfcfcbb42a1b3f2fe4dd737520c60374a9fe1d1dcd8a9a56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.070554 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b34a8b788c0ff6e0d0cb1dc375ec9e2ae9a748c911713adc69f9e37f576222ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a694f4eb324ec480c999e6d57a53599b61b5bb42c3f3342e4672bc90b2e095e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e15435107b0cdf606bf3d4f78ddfec8b6641611a912f3fea51ba1ffc3031df3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb143029f5bbd51e49119aba69fcb5ecb8febacbdfa089ccef1298a4f372278b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://225d630b20a5da5172de2d03044794bbf6ac21d95421a502d21c642c92421e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6acdb40732904b212fc33575cf482c9e194722160e66442aee0619cda7f2c755\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://297b49165ae87405fe0a9b033ca58f7944adab1ddaf25c385ec6de493a037bef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://297b49165ae87405fe0a9b033ca58f7944adab1ddaf25c385ec6de493a037bef\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:37:10Z\\\",\\\"message\\\":\\\"penshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-secret-name:metrics-tls service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{config.openshift.io/v1 ClusterVersion version 9101b518-476b-4eea-8fa6-69b0534e5caa 0xc0006e16b7 \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Name:metrics,Protocol:TCP,Port:9393,TargetPort:{1 0 metrics},NodePort:0,AppProtocol:nil,},},Selector:map[string]string{name: dns-operator,},ClusterIP:10.217.4.174,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.174],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nF1125 09:37:10.701837 6338 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller init\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-gt7mq_openshift-ovn-kubernetes(b2e0e8c4-35b0-4ca8-acec-d6c94cf76354)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50a479ca6721781365a54ff1fd8aad2b8b97e0f298fe46b62c51cbc2434e980b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gt7mq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.080729 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lf42b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0358b43-024d-430b-b886-c3ba51fb479e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://77304ae3027f1ed588bd648fe7573d6a3bc7502ab02db6687fd7f9a885429104\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9zdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lf42b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.094265 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe8c3a2-bb2a-4a49-b104-fb0f10a74b74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c67a2da54854403d420ac7c05ea4211260914f5123d6ac5f086e22b88256a331\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://64018e6a7f7cdfcdd8339c676ebae79d9047a099a69048351406278d2a142863\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53b3a5ae9621d23b07fe07044dba09946f559dc5b8e75d8f1b6a2ccd0c672247\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.106958 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.118623 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b789d7d13805b447638f66bcfa89b997c9ad47c92e85a776e97e70ea1a7e950\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e898d302f42097c6c149260d69f6cdc0bc4088e1b86714c3344a375b16cd7a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9qdk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.137021 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36afbdef-e971-4c88-b8fd-0f289b9dd07c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:36:49Z\\\",\\\"message\\\":\\\"W1125 09:36:38.376967 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:36:38.377570 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063398 cert, and key in /tmp/serving-cert-3164028442/serving-signer.crt, /tmp/serving-cert-3164028442/serving-signer.key\\\\nI1125 09:36:38.736353 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:36:38.739775 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:36:38.739926 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:36:38.743151 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3164028442/tls.crt::/tmp/serving-cert-3164028442/tls.key\\\\\\\"\\\\nF1125 09:36:49.315575 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.140617 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.140655 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.140692 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.140718 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.140728 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:25Z","lastTransitionTime":"2025-11-25T09:37:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.150046 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd00115c937d7bd2a5f0c16c846127b630c250ba8bd33ec244ee0df3c2649e2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.161754 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l4c8x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"baab3391-6269-467e-be1c-c992c82ddd7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caac5bab62b2f161eafa08b0d713c2619e27e61b24b106286f9e08c46af2c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xkwcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l4c8x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.176555 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rbb99" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"377b0f2c-4152-40db-a4c1-be3126061d7e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr9p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr9p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:37:11Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rbb99\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.189057 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.201369 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.215704 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34f44032-4367-4650-b4e0-02aa8d3209ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a71ae62f5763cc4804ec5c00a4c8619d470790e92dd99e1afe671ae166458c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkjjq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.229193 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-txnt5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e9f759-2eea-43cd-9e0a-6f149785c431\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vhlt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-txnt5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.243127 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.243176 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.243191 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.243206 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.243217 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:25Z","lastTransitionTime":"2025-11-25T09:37:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.345813 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.345863 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.345875 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.345894 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.345906 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:25Z","lastTransitionTime":"2025-11-25T09:37:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.448953 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.448993 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.449004 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.449021 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.449034 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:25Z","lastTransitionTime":"2025-11-25T09:37:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.550882 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.550928 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.550942 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.550959 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.550971 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:25Z","lastTransitionTime":"2025-11-25T09:37:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.653626 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.653694 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.653707 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.653722 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.653733 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:25Z","lastTransitionTime":"2025-11-25T09:37:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.755536 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.755596 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.755605 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.755634 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.755646 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:25Z","lastTransitionTime":"2025-11-25T09:37:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.858239 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.858278 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.858289 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.858304 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.858313 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:25Z","lastTransitionTime":"2025-11-25T09:37:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.960553 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.960620 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.960637 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.960666 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:25 crc kubenswrapper[4854]: I1125 09:37:25.960711 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:25Z","lastTransitionTime":"2025-11-25T09:37:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.013380 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.013418 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.013380 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:37:26 crc kubenswrapper[4854]: E1125 09:37:26.013540 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.013628 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:37:26 crc kubenswrapper[4854]: E1125 09:37:26.013772 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:37:26 crc kubenswrapper[4854]: E1125 09:37:26.013909 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:37:26 crc kubenswrapper[4854]: E1125 09:37:26.013972 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rbb99" podUID="377b0f2c-4152-40db-a4c1-be3126061d7e" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.063028 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.063067 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.063092 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.063107 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.063116 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:26Z","lastTransitionTime":"2025-11-25T09:37:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.165631 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.165693 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.165719 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.165737 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.165747 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:26Z","lastTransitionTime":"2025-11-25T09:37:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.268435 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.268499 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.268511 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.268530 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.268541 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:26Z","lastTransitionTime":"2025-11-25T09:37:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.370655 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.370730 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.370745 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.370766 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.370777 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:26Z","lastTransitionTime":"2025-11-25T09:37:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.473693 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.473731 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.473743 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.473759 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.473770 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:26Z","lastTransitionTime":"2025-11-25T09:37:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.576596 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.576634 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.576644 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.576660 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.576696 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:26Z","lastTransitionTime":"2025-11-25T09:37:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.678441 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.678484 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.678496 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.678511 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.678522 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:26Z","lastTransitionTime":"2025-11-25T09:37:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.781004 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.781053 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.781070 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.781092 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.781109 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:26Z","lastTransitionTime":"2025-11-25T09:37:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.883935 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.883992 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.884000 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.884014 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.884024 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:26Z","lastTransitionTime":"2025-11-25T09:37:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.964494 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/377b0f2c-4152-40db-a4c1-be3126061d7e-metrics-certs\") pod \"network-metrics-daemon-rbb99\" (UID: \"377b0f2c-4152-40db-a4c1-be3126061d7e\") " pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:37:26 crc kubenswrapper[4854]: E1125 09:37:26.964755 4854 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:37:26 crc kubenswrapper[4854]: E1125 09:37:26.964826 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/377b0f2c-4152-40db-a4c1-be3126061d7e-metrics-certs podName:377b0f2c-4152-40db-a4c1-be3126061d7e nodeName:}" failed. No retries permitted until 2025-11-25 09:37:42.964802809 +0000 UTC m=+68.817796215 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/377b0f2c-4152-40db-a4c1-be3126061d7e-metrics-certs") pod "network-metrics-daemon-rbb99" (UID: "377b0f2c-4152-40db-a4c1-be3126061d7e") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.986054 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.986108 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.986123 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.986140 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:26 crc kubenswrapper[4854]: I1125 09:37:26.986152 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:26Z","lastTransitionTime":"2025-11-25T09:37:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.088847 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.088894 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.088904 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.088951 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.088963 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:27Z","lastTransitionTime":"2025-11-25T09:37:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.191857 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.191924 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.191945 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.191968 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.191985 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:27Z","lastTransitionTime":"2025-11-25T09:37:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.294016 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.294051 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.294060 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.294073 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.294083 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:27Z","lastTransitionTime":"2025-11-25T09:37:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.396652 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.396921 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.397021 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.397281 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.397353 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:27Z","lastTransitionTime":"2025-11-25T09:37:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.500472 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.500759 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.500828 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.500936 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.501030 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:27Z","lastTransitionTime":"2025-11-25T09:37:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.603723 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.603763 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.603773 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.603788 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.603797 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:27Z","lastTransitionTime":"2025-11-25T09:37:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.705924 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.705971 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.705982 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.705996 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.706007 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:27Z","lastTransitionTime":"2025-11-25T09:37:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.808094 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.808149 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.808158 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.808174 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.808183 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:27Z","lastTransitionTime":"2025-11-25T09:37:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.875330 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.875434 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.875497 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.875534 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:37:27 crc kubenswrapper[4854]: E1125 09:37:27.875613 4854 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:37:27 crc kubenswrapper[4854]: E1125 09:37:27.875636 4854 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:37:27 crc kubenswrapper[4854]: E1125 09:37:27.875641 4854 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:37:27 crc kubenswrapper[4854]: E1125 09:37:27.875662 4854 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:37:27 crc kubenswrapper[4854]: E1125 09:37:27.875719 4854 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:37:27 crc kubenswrapper[4854]: E1125 09:37:27.875646 4854 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:37:27 crc kubenswrapper[4854]: E1125 09:37:27.875616 4854 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:37:27 crc kubenswrapper[4854]: E1125 09:37:27.875618 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:37:59.875585923 +0000 UTC m=+85.728579329 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.876063 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:37:27 crc kubenswrapper[4854]: E1125 09:37:27.876139 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 09:37:59.876102478 +0000 UTC m=+85.729095944 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:37:27 crc kubenswrapper[4854]: E1125 09:37:27.876178 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 09:37:59.876168829 +0000 UTC m=+85.729162335 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:37:27 crc kubenswrapper[4854]: E1125 09:37:27.876189 4854 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:37:27 crc kubenswrapper[4854]: E1125 09:37:27.876197 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:37:59.87618617 +0000 UTC m=+85.729179736 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:37:27 crc kubenswrapper[4854]: E1125 09:37:27.876313 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:37:59.876288083 +0000 UTC m=+85.729281489 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.910887 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.910919 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.910927 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.910940 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:27 crc kubenswrapper[4854]: I1125 09:37:27.910949 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:27Z","lastTransitionTime":"2025-11-25T09:37:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.012448 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.012518 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.012583 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:37:28 crc kubenswrapper[4854]: E1125 09:37:28.012591 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rbb99" podUID="377b0f2c-4152-40db-a4c1-be3126061d7e" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.012615 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:37:28 crc kubenswrapper[4854]: E1125 09:37:28.012714 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:37:28 crc kubenswrapper[4854]: E1125 09:37:28.012921 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:37:28 crc kubenswrapper[4854]: E1125 09:37:28.012984 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.014131 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.014171 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.014184 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.014198 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.014209 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:28Z","lastTransitionTime":"2025-11-25T09:37:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.116298 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.116339 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.116348 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.116364 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.116373 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:28Z","lastTransitionTime":"2025-11-25T09:37:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.218864 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.218903 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.218912 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.218925 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.218934 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:28Z","lastTransitionTime":"2025-11-25T09:37:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.321309 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.321351 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.321362 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.321377 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.321389 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:28Z","lastTransitionTime":"2025-11-25T09:37:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.424531 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.424568 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.424587 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.424599 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.424608 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:28Z","lastTransitionTime":"2025-11-25T09:37:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.527229 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.527261 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.527271 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.527283 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.527292 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:28Z","lastTransitionTime":"2025-11-25T09:37:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.630434 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.630483 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.630494 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.630511 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.630524 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:28Z","lastTransitionTime":"2025-11-25T09:37:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.732980 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.733043 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.733057 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.733075 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.733085 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:28Z","lastTransitionTime":"2025-11-25T09:37:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.835891 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.835940 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.835952 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.835968 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.835980 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:28Z","lastTransitionTime":"2025-11-25T09:37:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.943128 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.943177 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.943190 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.943209 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.943223 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:28Z","lastTransitionTime":"2025-11-25T09:37:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.961607 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.969841 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.979666 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe8c3a2-bb2a-4a49-b104-fb0f10a74b74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c67a2da54854403d420ac7c05ea4211260914f5123d6ac5f086e22b88256a331\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://64018e6a7f7cdfcdd8339c676ebae79d9047a099a69048351406278d2a142863\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53b3a5ae9621d23b07fe07044dba09946f559dc5b8e75d8f1b6a2ccd0c672247\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:28Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:28 crc kubenswrapper[4854]: I1125 09:37:28.995949 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:28Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.008547 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b789d7d13805b447638f66bcfa89b997c9ad47c92e85a776e97e70ea1a7e950\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e898d302f42097c6c149260d69f6cdc0bc4088e1b86714c3344a375b16cd7a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9qdk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:29Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.024008 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd00115c937d7bd2a5f0c16c846127b630c250ba8bd33ec244ee0df3c2649e2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:29Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.040218 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l4c8x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"baab3391-6269-467e-be1c-c992c82ddd7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caac5bab62b2f161eafa08b0d713c2619e27e61b24b106286f9e08c46af2c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xkwcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l4c8x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:29Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.045869 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.045935 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.045959 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.045990 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.046014 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:29Z","lastTransitionTime":"2025-11-25T09:37:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.054186 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rbb99" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"377b0f2c-4152-40db-a4c1-be3126061d7e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr9p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr9p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:37:11Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rbb99\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:29Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.067064 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36afbdef-e971-4c88-b8fd-0f289b9dd07c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:36:49Z\\\",\\\"message\\\":\\\"W1125 09:36:38.376967 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:36:38.377570 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063398 cert, and key in /tmp/serving-cert-3164028442/serving-signer.crt, /tmp/serving-cert-3164028442/serving-signer.key\\\\nI1125 09:36:38.736353 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:36:38.739775 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:36:38.739926 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:36:38.743151 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3164028442/tls.crt::/tmp/serving-cert-3164028442/tls.key\\\\\\\"\\\\nF1125 09:36:49.315575 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:29Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.079508 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:29Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.096419 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34f44032-4367-4650-b4e0-02aa8d3209ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a71ae62f5763cc4804ec5c00a4c8619d470790e92dd99e1afe671ae166458c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkjjq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:29Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.109215 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-txnt5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e9f759-2eea-43cd-9e0a-6f149785c431\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vhlt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-txnt5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:29Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.122942 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:29Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.134712 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a8e6c353d6a9d159c07ccbd19c8d659f360fba227bd027a3d3e32aadff5724\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:29Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.147882 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13fffc919890dba4c50685bc1aa11c87a1c3e8ca4c22d40bf3993ecb29cb2b7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e04e784232d124ccfcfcbb42a1b3f2fe4dd737520c60374a9fe1d1dcd8a9a56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:29Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.148794 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.148851 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.148862 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.148880 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.148893 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:29Z","lastTransitionTime":"2025-11-25T09:37:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.168936 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b34a8b788c0ff6e0d0cb1dc375ec9e2ae9a748c911713adc69f9e37f576222ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a694f4eb324ec480c999e6d57a53599b61b5bb42c3f3342e4672bc90b2e095e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e15435107b0cdf606bf3d4f78ddfec8b6641611a912f3fea51ba1ffc3031df3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb143029f5bbd51e49119aba69fcb5ecb8febacbdfa089ccef1298a4f372278b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://225d630b20a5da5172de2d03044794bbf6ac21d95421a502d21c642c92421e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6acdb40732904b212fc33575cf482c9e194722160e66442aee0619cda7f2c755\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://297b49165ae87405fe0a9b033ca58f7944adab1ddaf25c385ec6de493a037bef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://297b49165ae87405fe0a9b033ca58f7944adab1ddaf25c385ec6de493a037bef\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:37:10Z\\\",\\\"message\\\":\\\"penshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-secret-name:metrics-tls service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{config.openshift.io/v1 ClusterVersion version 9101b518-476b-4eea-8fa6-69b0534e5caa 0xc0006e16b7 \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Name:metrics,Protocol:TCP,Port:9393,TargetPort:{1 0 metrics},NodePort:0,AppProtocol:nil,},},Selector:map[string]string{name: dns-operator,},ClusterIP:10.217.4.174,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.174],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nF1125 09:37:10.701837 6338 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller init\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-gt7mq_openshift-ovn-kubernetes(b2e0e8c4-35b0-4ca8-acec-d6c94cf76354)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50a479ca6721781365a54ff1fd8aad2b8b97e0f298fe46b62c51cbc2434e980b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gt7mq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:29Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.179915 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lf42b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0358b43-024d-430b-b886-c3ba51fb479e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://77304ae3027f1ed588bd648fe7573d6a3bc7502ab02db6687fd7f9a885429104\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9zdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lf42b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:29Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.194234 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zd22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7cb2c023-6f3b-4c24-a49d-1f4686b5eca5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://defcab141d127cd4f8abcc2c984b23984b793a596c443ce6ab8317400f5a4ed5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9v8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00a26dc878dfb14a49676fc2c48da211fe1d5a28609cd5f564512cdb6a60fe6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9v8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:37:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7zd22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:29Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.251183 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.251238 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.251255 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.251276 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.251291 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:29Z","lastTransitionTime":"2025-11-25T09:37:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.354495 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.354546 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.354557 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.354576 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.354588 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:29Z","lastTransitionTime":"2025-11-25T09:37:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.458325 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.458385 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.458402 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.458427 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.458447 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:29Z","lastTransitionTime":"2025-11-25T09:37:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.561320 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.561354 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.561363 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.561377 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.561385 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:29Z","lastTransitionTime":"2025-11-25T09:37:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.664055 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.664101 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.664114 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.664130 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.664140 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:29Z","lastTransitionTime":"2025-11-25T09:37:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.766725 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.766760 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.766769 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.766784 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.766791 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:29Z","lastTransitionTime":"2025-11-25T09:37:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.868563 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.868593 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.868602 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.868616 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.868626 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:29Z","lastTransitionTime":"2025-11-25T09:37:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.970981 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.971039 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.971055 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.971077 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:29 crc kubenswrapper[4854]: I1125 09:37:29.971095 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:29Z","lastTransitionTime":"2025-11-25T09:37:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.012952 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.013011 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.012998 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.012957 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:37:30 crc kubenswrapper[4854]: E1125 09:37:30.013162 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rbb99" podUID="377b0f2c-4152-40db-a4c1-be3126061d7e" Nov 25 09:37:30 crc kubenswrapper[4854]: E1125 09:37:30.013247 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:37:30 crc kubenswrapper[4854]: E1125 09:37:30.013318 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:37:30 crc kubenswrapper[4854]: E1125 09:37:30.013402 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.073647 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.073729 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.073764 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.073783 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.073795 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:30Z","lastTransitionTime":"2025-11-25T09:37:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.175703 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.175750 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.175767 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.175787 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.175799 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:30Z","lastTransitionTime":"2025-11-25T09:37:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.278698 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.278744 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.278755 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.278771 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.278785 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:30Z","lastTransitionTime":"2025-11-25T09:37:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.381061 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.381130 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.381141 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.381157 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.381167 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:30Z","lastTransitionTime":"2025-11-25T09:37:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.483993 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.484046 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.484055 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.484068 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.484076 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:30Z","lastTransitionTime":"2025-11-25T09:37:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.586787 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.586822 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.586832 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.586851 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.586860 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:30Z","lastTransitionTime":"2025-11-25T09:37:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.689053 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.689096 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.689109 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.689126 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.689139 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:30Z","lastTransitionTime":"2025-11-25T09:37:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.791596 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.791634 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.791645 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.791660 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.791694 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:30Z","lastTransitionTime":"2025-11-25T09:37:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.893742 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.893786 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.893799 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.893817 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.893828 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:30Z","lastTransitionTime":"2025-11-25T09:37:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.996114 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.996162 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.996174 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.996193 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:30 crc kubenswrapper[4854]: I1125 09:37:30.996204 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:30Z","lastTransitionTime":"2025-11-25T09:37:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.098615 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.098725 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.098763 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.098794 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.098813 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:31Z","lastTransitionTime":"2025-11-25T09:37:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.201826 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.201876 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.201890 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.201910 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.201926 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:31Z","lastTransitionTime":"2025-11-25T09:37:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.304618 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.304664 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.304703 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.304724 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.304739 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:31Z","lastTransitionTime":"2025-11-25T09:37:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.407474 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.407527 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.407549 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.407580 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.407603 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:31Z","lastTransitionTime":"2025-11-25T09:37:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.510644 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.510711 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.510722 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.510739 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.510757 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:31Z","lastTransitionTime":"2025-11-25T09:37:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.613785 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.613828 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.613838 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.613853 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.613865 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:31Z","lastTransitionTime":"2025-11-25T09:37:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.716898 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.716951 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.716965 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.716988 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.717000 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:31Z","lastTransitionTime":"2025-11-25T09:37:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.819607 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.819702 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.819729 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.819752 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.819767 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:31Z","lastTransitionTime":"2025-11-25T09:37:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.921552 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.921590 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.921599 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.921615 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:31 crc kubenswrapper[4854]: I1125 09:37:31.921626 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:31Z","lastTransitionTime":"2025-11-25T09:37:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.012746 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.012813 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.012852 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:37:32 crc kubenswrapper[4854]: E1125 09:37:32.012901 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rbb99" podUID="377b0f2c-4152-40db-a4c1-be3126061d7e" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.012921 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:37:32 crc kubenswrapper[4854]: E1125 09:37:32.013056 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:37:32 crc kubenswrapper[4854]: E1125 09:37:32.013091 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:37:32 crc kubenswrapper[4854]: E1125 09:37:32.013148 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.024341 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.024400 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.024421 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.024442 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.024458 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:32Z","lastTransitionTime":"2025-11-25T09:37:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.127231 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.127280 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.127292 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.127311 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.127323 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:32Z","lastTransitionTime":"2025-11-25T09:37:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.230250 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.230292 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.230301 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.230316 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.230325 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:32Z","lastTransitionTime":"2025-11-25T09:37:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.333013 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.333070 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.333088 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.333111 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.333128 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:32Z","lastTransitionTime":"2025-11-25T09:37:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.435308 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.435340 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.435347 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.435360 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.435382 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:32Z","lastTransitionTime":"2025-11-25T09:37:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.537636 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.537728 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.537744 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.537765 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.537778 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:32Z","lastTransitionTime":"2025-11-25T09:37:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.639446 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.639485 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.639495 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.639511 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.639521 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:32Z","lastTransitionTime":"2025-11-25T09:37:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.741114 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.741149 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.741158 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.741170 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.741179 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:32Z","lastTransitionTime":"2025-11-25T09:37:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.844162 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.844244 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.844269 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.844295 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.844312 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:32Z","lastTransitionTime":"2025-11-25T09:37:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.946989 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.947059 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.947079 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.947103 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:32 crc kubenswrapper[4854]: I1125 09:37:32.947120 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:32Z","lastTransitionTime":"2025-11-25T09:37:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.050273 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.050327 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.050344 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.050367 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.050387 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:33Z","lastTransitionTime":"2025-11-25T09:37:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.055971 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.056011 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.056022 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.056037 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.056047 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:33Z","lastTransitionTime":"2025-11-25T09:37:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:33 crc kubenswrapper[4854]: E1125 09:37:33.075397 4854 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a363dd8e-616a-41fb-b3a6-8f9b7ff40e37\\\",\\\"systemUUID\\\":\\\"9ec5d79d-dba9-49c0-8c51-26f030e53128\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.080024 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.080056 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.080067 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.080082 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.080094 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:33Z","lastTransitionTime":"2025-11-25T09:37:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:33 crc kubenswrapper[4854]: E1125 09:37:33.098437 4854 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a363dd8e-616a-41fb-b3a6-8f9b7ff40e37\\\",\\\"systemUUID\\\":\\\"9ec5d79d-dba9-49c0-8c51-26f030e53128\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.102881 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.102943 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.102964 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.102988 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.103004 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:33Z","lastTransitionTime":"2025-11-25T09:37:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:33 crc kubenswrapper[4854]: E1125 09:37:33.118340 4854 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a363dd8e-616a-41fb-b3a6-8f9b7ff40e37\\\",\\\"systemUUID\\\":\\\"9ec5d79d-dba9-49c0-8c51-26f030e53128\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.121947 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.121972 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.121982 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.121997 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.122007 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:33Z","lastTransitionTime":"2025-11-25T09:37:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:33 crc kubenswrapper[4854]: E1125 09:37:33.132791 4854 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a363dd8e-616a-41fb-b3a6-8f9b7ff40e37\\\",\\\"systemUUID\\\":\\\"9ec5d79d-dba9-49c0-8c51-26f030e53128\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.136278 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.136322 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.136331 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.136350 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.136362 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:33Z","lastTransitionTime":"2025-11-25T09:37:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:33 crc kubenswrapper[4854]: E1125 09:37:33.149292 4854 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a363dd8e-616a-41fb-b3a6-8f9b7ff40e37\\\",\\\"systemUUID\\\":\\\"9ec5d79d-dba9-49c0-8c51-26f030e53128\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:33 crc kubenswrapper[4854]: E1125 09:37:33.149409 4854 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.152801 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.152879 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.152902 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.152934 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.152957 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:33Z","lastTransitionTime":"2025-11-25T09:37:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.255719 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.255770 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.255782 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.255798 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.255808 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:33Z","lastTransitionTime":"2025-11-25T09:37:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.357908 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.357962 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.357982 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.358007 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.358028 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:33Z","lastTransitionTime":"2025-11-25T09:37:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.460948 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.460989 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.461004 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.461024 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.461038 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:33Z","lastTransitionTime":"2025-11-25T09:37:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.564139 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.564195 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.564212 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.564233 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.564294 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:33Z","lastTransitionTime":"2025-11-25T09:37:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.667784 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.667827 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.667838 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.667860 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.667875 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:33Z","lastTransitionTime":"2025-11-25T09:37:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.770592 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.770653 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.770706 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.770730 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.770747 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:33Z","lastTransitionTime":"2025-11-25T09:37:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.874367 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.874493 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.874514 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.874539 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.874556 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:33Z","lastTransitionTime":"2025-11-25T09:37:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.978323 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.978379 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.978396 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.978417 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:33 crc kubenswrapper[4854]: I1125 09:37:33.978434 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:33Z","lastTransitionTime":"2025-11-25T09:37:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.012371 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.012400 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.012483 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.012491 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:37:34 crc kubenswrapper[4854]: E1125 09:37:34.012641 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:37:34 crc kubenswrapper[4854]: E1125 09:37:34.013279 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rbb99" podUID="377b0f2c-4152-40db-a4c1-be3126061d7e" Nov 25 09:37:34 crc kubenswrapper[4854]: E1125 09:37:34.013426 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:37:34 crc kubenswrapper[4854]: E1125 09:37:34.013642 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.013748 4854 scope.go:117] "RemoveContainer" containerID="297b49165ae87405fe0a9b033ca58f7944adab1ddaf25c385ec6de493a037bef" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.080270 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.080533 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.080545 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.080561 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.080571 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:34Z","lastTransitionTime":"2025-11-25T09:37:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.183146 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.183176 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.183185 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.183199 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.183208 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:34Z","lastTransitionTime":"2025-11-25T09:37:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.285560 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.285601 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.285609 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.285637 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.285650 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:34Z","lastTransitionTime":"2025-11-25T09:37:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.331495 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gt7mq_b2e0e8c4-35b0-4ca8-acec-d6c94cf76354/ovnkube-controller/1.log" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.333947 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" event={"ID":"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354","Type":"ContainerStarted","Data":"1a6d7c2acd862f8700bbd5c57584e1b55c5f1da6ff0c8431f1cd400508713d7c"} Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.334893 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.350024 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a8e6c353d6a9d159c07ccbd19c8d659f360fba227bd027a3d3e32aadff5724\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.366023 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13fffc919890dba4c50685bc1aa11c87a1c3e8ca4c22d40bf3993ecb29cb2b7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e04e784232d124ccfcfcbb42a1b3f2fe4dd737520c60374a9fe1d1dcd8a9a56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.384433 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b34a8b788c0ff6e0d0cb1dc375ec9e2ae9a748c911713adc69f9e37f576222ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a694f4eb324ec480c999e6d57a53599b61b5bb42c3f3342e4672bc90b2e095e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e15435107b0cdf606bf3d4f78ddfec8b6641611a912f3fea51ba1ffc3031df3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb143029f5bbd51e49119aba69fcb5ecb8febacbdfa089ccef1298a4f372278b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://225d630b20a5da5172de2d03044794bbf6ac21d95421a502d21c642c92421e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6acdb40732904b212fc33575cf482c9e194722160e66442aee0619cda7f2c755\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a6d7c2acd862f8700bbd5c57584e1b55c5f1da6ff0c8431f1cd400508713d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://297b49165ae87405fe0a9b033ca58f7944adab1ddaf25c385ec6de493a037bef\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:37:10Z\\\",\\\"message\\\":\\\"penshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-secret-name:metrics-tls service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{config.openshift.io/v1 ClusterVersion version 9101b518-476b-4eea-8fa6-69b0534e5caa 0xc0006e16b7 \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Name:metrics,Protocol:TCP,Port:9393,TargetPort:{1 0 metrics},NodePort:0,AppProtocol:nil,},},Selector:map[string]string{name: dns-operator,},ClusterIP:10.217.4.174,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.174],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nF1125 09:37:10.701837 6338 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller init\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50a479ca6721781365a54ff1fd8aad2b8b97e0f298fe46b62c51cbc2434e980b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gt7mq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.388529 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.388559 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.388567 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.388589 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.388600 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:34Z","lastTransitionTime":"2025-11-25T09:37:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.400307 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lf42b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0358b43-024d-430b-b886-c3ba51fb479e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://77304ae3027f1ed588bd648fe7573d6a3bc7502ab02db6687fd7f9a885429104\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9zdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lf42b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.416965 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zd22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7cb2c023-6f3b-4c24-a49d-1f4686b5eca5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://defcab141d127cd4f8abcc2c984b23984b793a596c443ce6ab8317400f5a4ed5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9v8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00a26dc878dfb14a49676fc2c48da211fe1d5a28609cd5f564512cdb6a60fe6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9v8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:37:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7zd22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.430377 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe8c3a2-bb2a-4a49-b104-fb0f10a74b74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c67a2da54854403d420ac7c05ea4211260914f5123d6ac5f086e22b88256a331\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://64018e6a7f7cdfcdd8339c676ebae79d9047a099a69048351406278d2a142863\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53b3a5ae9621d23b07fe07044dba09946f559dc5b8e75d8f1b6a2ccd0c672247\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.447436 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.500414 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.500454 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.500466 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.500481 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.500492 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:34Z","lastTransitionTime":"2025-11-25T09:37:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.502874 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b789d7d13805b447638f66bcfa89b997c9ad47c92e85a776e97e70ea1a7e950\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e898d302f42097c6c149260d69f6cdc0bc4088e1b86714c3344a375b16cd7a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9qdk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.520736 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36afbdef-e971-4c88-b8fd-0f289b9dd07c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:36:49Z\\\",\\\"message\\\":\\\"W1125 09:36:38.376967 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:36:38.377570 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063398 cert, and key in /tmp/serving-cert-3164028442/serving-signer.crt, /tmp/serving-cert-3164028442/serving-signer.key\\\\nI1125 09:36:38.736353 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:36:38.739775 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:36:38.739926 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:36:38.743151 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3164028442/tls.crt::/tmp/serving-cert-3164028442/tls.key\\\\\\\"\\\\nF1125 09:36:49.315575 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.530327 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba9be543-b9b2-43fe-b387-e4e8d93a5f33\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://970f252dcc9145faeafc8d8bd7ac63b329710552096733720038b3cc76a6739e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccf28beb4109e4bf20284e9549e2063585667d175f20d55ccce18b7b7441e4e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://367de262895fba6dd5ef497f51059236c7b388d54847ea16f79bc1157017b0ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a541e16003f5246d94c8e614253336b755097bbb7989b98efeffba79091aa5ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a541e16003f5246d94c8e614253336b755097bbb7989b98efeffba79091aa5ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.540843 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd00115c937d7bd2a5f0c16c846127b630c250ba8bd33ec244ee0df3c2649e2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.553429 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l4c8x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"baab3391-6269-467e-be1c-c992c82ddd7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caac5bab62b2f161eafa08b0d713c2619e27e61b24b106286f9e08c46af2c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xkwcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l4c8x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.563860 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rbb99" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"377b0f2c-4152-40db-a4c1-be3126061d7e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr9p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr9p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:37:11Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rbb99\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.578014 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.591420 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.602742 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.602777 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.602787 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.602803 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.602815 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:34Z","lastTransitionTime":"2025-11-25T09:37:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.605366 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34f44032-4367-4650-b4e0-02aa8d3209ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a71ae62f5763cc4804ec5c00a4c8619d470790e92dd99e1afe671ae166458c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkjjq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.616607 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-txnt5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e9f759-2eea-43cd-9e0a-6f149785c431\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vhlt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-txnt5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.704785 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.704832 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.704844 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.704859 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.704869 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:34Z","lastTransitionTime":"2025-11-25T09:37:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.807543 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.807588 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.807612 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.807629 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.807640 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:34Z","lastTransitionTime":"2025-11-25T09:37:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.909795 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.909833 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.909841 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.909855 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:34 crc kubenswrapper[4854]: I1125 09:37:34.909864 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:34Z","lastTransitionTime":"2025-11-25T09:37:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.011820 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.011881 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.011897 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.011917 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.011933 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:35Z","lastTransitionTime":"2025-11-25T09:37:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.028936 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe8c3a2-bb2a-4a49-b104-fb0f10a74b74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c67a2da54854403d420ac7c05ea4211260914f5123d6ac5f086e22b88256a331\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://64018e6a7f7cdfcdd8339c676ebae79d9047a099a69048351406278d2a142863\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53b3a5ae9621d23b07fe07044dba09946f559dc5b8e75d8f1b6a2ccd0c672247\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.042057 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.054656 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b789d7d13805b447638f66bcfa89b997c9ad47c92e85a776e97e70ea1a7e950\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e898d302f42097c6c149260d69f6cdc0bc4088e1b86714c3344a375b16cd7a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9qdk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.072929 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36afbdef-e971-4c88-b8fd-0f289b9dd07c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:36:49Z\\\",\\\"message\\\":\\\"W1125 09:36:38.376967 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:36:38.377570 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063398 cert, and key in /tmp/serving-cert-3164028442/serving-signer.crt, /tmp/serving-cert-3164028442/serving-signer.key\\\\nI1125 09:36:38.736353 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:36:38.739775 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:36:38.739926 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:36:38.743151 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3164028442/tls.crt::/tmp/serving-cert-3164028442/tls.key\\\\\\\"\\\\nF1125 09:36:49.315575 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.088467 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba9be543-b9b2-43fe-b387-e4e8d93a5f33\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://970f252dcc9145faeafc8d8bd7ac63b329710552096733720038b3cc76a6739e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccf28beb4109e4bf20284e9549e2063585667d175f20d55ccce18b7b7441e4e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://367de262895fba6dd5ef497f51059236c7b388d54847ea16f79bc1157017b0ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a541e16003f5246d94c8e614253336b755097bbb7989b98efeffba79091aa5ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a541e16003f5246d94c8e614253336b755097bbb7989b98efeffba79091aa5ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.105510 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd00115c937d7bd2a5f0c16c846127b630c250ba8bd33ec244ee0df3c2649e2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.114278 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.114344 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.114364 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.114393 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.114411 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:35Z","lastTransitionTime":"2025-11-25T09:37:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.117562 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l4c8x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"baab3391-6269-467e-be1c-c992c82ddd7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caac5bab62b2f161eafa08b0d713c2619e27e61b24b106286f9e08c46af2c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xkwcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l4c8x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.129584 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rbb99" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"377b0f2c-4152-40db-a4c1-be3126061d7e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr9p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr9p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:37:11Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rbb99\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.143182 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.154194 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.167658 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34f44032-4367-4650-b4e0-02aa8d3209ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a71ae62f5763cc4804ec5c00a4c8619d470790e92dd99e1afe671ae166458c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkjjq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.182381 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-txnt5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e9f759-2eea-43cd-9e0a-6f149785c431\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vhlt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-txnt5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.197831 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a8e6c353d6a9d159c07ccbd19c8d659f360fba227bd027a3d3e32aadff5724\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.213307 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13fffc919890dba4c50685bc1aa11c87a1c3e8ca4c22d40bf3993ecb29cb2b7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e04e784232d124ccfcfcbb42a1b3f2fe4dd737520c60374a9fe1d1dcd8a9a56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.216759 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.216826 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.216853 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.216879 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.216896 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:35Z","lastTransitionTime":"2025-11-25T09:37:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.233701 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b34a8b788c0ff6e0d0cb1dc375ec9e2ae9a748c911713adc69f9e37f576222ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a694f4eb324ec480c999e6d57a53599b61b5bb42c3f3342e4672bc90b2e095e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e15435107b0cdf606bf3d4f78ddfec8b6641611a912f3fea51ba1ffc3031df3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb143029f5bbd51e49119aba69fcb5ecb8febacbdfa089ccef1298a4f372278b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://225d630b20a5da5172de2d03044794bbf6ac21d95421a502d21c642c92421e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6acdb40732904b212fc33575cf482c9e194722160e66442aee0619cda7f2c755\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a6d7c2acd862f8700bbd5c57584e1b55c5f1da6ff0c8431f1cd400508713d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://297b49165ae87405fe0a9b033ca58f7944adab1ddaf25c385ec6de493a037bef\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:37:10Z\\\",\\\"message\\\":\\\"penshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-secret-name:metrics-tls service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{config.openshift.io/v1 ClusterVersion version 9101b518-476b-4eea-8fa6-69b0534e5caa 0xc0006e16b7 \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Name:metrics,Protocol:TCP,Port:9393,TargetPort:{1 0 metrics},NodePort:0,AppProtocol:nil,},},Selector:map[string]string{name: dns-operator,},ClusterIP:10.217.4.174,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.174],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nF1125 09:37:10.701837 6338 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller init\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50a479ca6721781365a54ff1fd8aad2b8b97e0f298fe46b62c51cbc2434e980b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gt7mq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.247856 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lf42b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0358b43-024d-430b-b886-c3ba51fb479e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://77304ae3027f1ed588bd648fe7573d6a3bc7502ab02db6687fd7f9a885429104\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9zdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lf42b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.259804 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zd22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7cb2c023-6f3b-4c24-a49d-1f4686b5eca5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://defcab141d127cd4f8abcc2c984b23984b793a596c443ce6ab8317400f5a4ed5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9v8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00a26dc878dfb14a49676fc2c48da211fe1d5a28609cd5f564512cdb6a60fe6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9v8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:37:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7zd22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.318497 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.318537 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.318824 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.318845 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.318853 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:35Z","lastTransitionTime":"2025-11-25T09:37:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.338585 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gt7mq_b2e0e8c4-35b0-4ca8-acec-d6c94cf76354/ovnkube-controller/2.log" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.339228 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gt7mq_b2e0e8c4-35b0-4ca8-acec-d6c94cf76354/ovnkube-controller/1.log" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.341917 4854 generic.go:334] "Generic (PLEG): container finished" podID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerID="1a6d7c2acd862f8700bbd5c57584e1b55c5f1da6ff0c8431f1cd400508713d7c" exitCode=1 Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.341955 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" event={"ID":"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354","Type":"ContainerDied","Data":"1a6d7c2acd862f8700bbd5c57584e1b55c5f1da6ff0c8431f1cd400508713d7c"} Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.341986 4854 scope.go:117] "RemoveContainer" containerID="297b49165ae87405fe0a9b033ca58f7944adab1ddaf25c385ec6de493a037bef" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.342871 4854 scope.go:117] "RemoveContainer" containerID="1a6d7c2acd862f8700bbd5c57584e1b55c5f1da6ff0c8431f1cd400508713d7c" Nov 25 09:37:35 crc kubenswrapper[4854]: E1125 09:37:35.343180 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-gt7mq_openshift-ovn-kubernetes(b2e0e8c4-35b0-4ca8-acec-d6c94cf76354)\"" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.359087 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b789d7d13805b447638f66bcfa89b997c9ad47c92e85a776e97e70ea1a7e950\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e898d302f42097c6c149260d69f6cdc0bc4088e1b86714c3344a375b16cd7a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9qdk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.373998 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe8c3a2-bb2a-4a49-b104-fb0f10a74b74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c67a2da54854403d420ac7c05ea4211260914f5123d6ac5f086e22b88256a331\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://64018e6a7f7cdfcdd8339c676ebae79d9047a099a69048351406278d2a142863\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53b3a5ae9621d23b07fe07044dba09946f559dc5b8e75d8f1b6a2ccd0c672247\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.388657 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.398507 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l4c8x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"baab3391-6269-467e-be1c-c992c82ddd7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caac5bab62b2f161eafa08b0d713c2619e27e61b24b106286f9e08c46af2c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xkwcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l4c8x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.413530 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rbb99" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"377b0f2c-4152-40db-a4c1-be3126061d7e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr9p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr9p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:37:11Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rbb99\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.422489 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.422642 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.422747 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.422941 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.423035 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:35Z","lastTransitionTime":"2025-11-25T09:37:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.428735 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36afbdef-e971-4c88-b8fd-0f289b9dd07c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:36:49Z\\\",\\\"message\\\":\\\"W1125 09:36:38.376967 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:36:38.377570 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063398 cert, and key in /tmp/serving-cert-3164028442/serving-signer.crt, /tmp/serving-cert-3164028442/serving-signer.key\\\\nI1125 09:36:38.736353 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:36:38.739775 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:36:38.739926 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:36:38.743151 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3164028442/tls.crt::/tmp/serving-cert-3164028442/tls.key\\\\\\\"\\\\nF1125 09:36:49.315575 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.442904 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba9be543-b9b2-43fe-b387-e4e8d93a5f33\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://970f252dcc9145faeafc8d8bd7ac63b329710552096733720038b3cc76a6739e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccf28beb4109e4bf20284e9549e2063585667d175f20d55ccce18b7b7441e4e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://367de262895fba6dd5ef497f51059236c7b388d54847ea16f79bc1157017b0ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a541e16003f5246d94c8e614253336b755097bbb7989b98efeffba79091aa5ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a541e16003f5246d94c8e614253336b755097bbb7989b98efeffba79091aa5ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.455236 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd00115c937d7bd2a5f0c16c846127b630c250ba8bd33ec244ee0df3c2649e2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.469385 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-txnt5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e9f759-2eea-43cd-9e0a-6f149785c431\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vhlt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-txnt5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.482232 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.494692 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.512951 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34f44032-4367-4650-b4e0-02aa8d3209ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a71ae62f5763cc4804ec5c00a4c8619d470790e92dd99e1afe671ae166458c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkjjq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.526083 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.526137 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.526146 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.526162 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.526171 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:35Z","lastTransitionTime":"2025-11-25T09:37:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.536845 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b34a8b788c0ff6e0d0cb1dc375ec9e2ae9a748c911713adc69f9e37f576222ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a694f4eb324ec480c999e6d57a53599b61b5bb42c3f3342e4672bc90b2e095e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e15435107b0cdf606bf3d4f78ddfec8b6641611a912f3fea51ba1ffc3031df3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb143029f5bbd51e49119aba69fcb5ecb8febacbdfa089ccef1298a4f372278b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://225d630b20a5da5172de2d03044794bbf6ac21d95421a502d21c642c92421e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6acdb40732904b212fc33575cf482c9e194722160e66442aee0619cda7f2c755\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a6d7c2acd862f8700bbd5c57584e1b55c5f1da6ff0c8431f1cd400508713d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://297b49165ae87405fe0a9b033ca58f7944adab1ddaf25c385ec6de493a037bef\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:37:10Z\\\",\\\"message\\\":\\\"penshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-secret-name:metrics-tls service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{config.openshift.io/v1 ClusterVersion version 9101b518-476b-4eea-8fa6-69b0534e5caa 0xc0006e16b7 \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Name:metrics,Protocol:TCP,Port:9393,TargetPort:{1 0 metrics},NodePort:0,AppProtocol:nil,},},Selector:map[string]string{name: dns-operator,},ClusterIP:10.217.4.174,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.174],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nF1125 09:37:10.701837 6338 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller init\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a6d7c2acd862f8700bbd5c57584e1b55c5f1da6ff0c8431f1cd400508713d7c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:37:34Z\\\",\\\"message\\\":\\\"ID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-controller-manager/controller-manager\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.149\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1125 09:37:34.797009 6613 services_controller.go:356] Processing sync for service openshift-multus/network-metrics-service for network=default\\\\nI1125 09:37:34.797784 6613 services_controller.go:360] Finished syncing service network-metrics-service on namespace openshift-multus for network=default : 776.472µs\\\\nI1125 09:37:34.797827 6613 services_controller.go:356] Processing sync for service openshift-image-registry/image-registry-operator for network=default\\\\nI1125 09:37:34.797857 6613 services_controller.go:360] Finished syncing service image-registry-operator on namespace openshift-image-registry for network=default : 30.941µs\\\\nF1125 09:37:34.797882 6613 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50a479ca6721781365a54ff1fd8aad2b8b97e0f298fe46b62c51cbc2434e980b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gt7mq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.548851 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lf42b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0358b43-024d-430b-b886-c3ba51fb479e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://77304ae3027f1ed588bd648fe7573d6a3bc7502ab02db6687fd7f9a885429104\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9zdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lf42b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.566376 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zd22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7cb2c023-6f3b-4c24-a49d-1f4686b5eca5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://defcab141d127cd4f8abcc2c984b23984b793a596c443ce6ab8317400f5a4ed5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9v8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00a26dc878dfb14a49676fc2c48da211fe1d5a28609cd5f564512cdb6a60fe6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9v8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:37:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7zd22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.582259 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a8e6c353d6a9d159c07ccbd19c8d659f360fba227bd027a3d3e32aadff5724\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.598695 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13fffc919890dba4c50685bc1aa11c87a1c3e8ca4c22d40bf3993ecb29cb2b7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e04e784232d124ccfcfcbb42a1b3f2fe4dd737520c60374a9fe1d1dcd8a9a56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.629093 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.629129 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.629139 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.629152 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.629162 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:35Z","lastTransitionTime":"2025-11-25T09:37:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.732374 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.732685 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.732695 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.732710 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.732720 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:35Z","lastTransitionTime":"2025-11-25T09:37:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.835264 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.835337 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.835361 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.835390 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.835423 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:35Z","lastTransitionTime":"2025-11-25T09:37:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.938013 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.938859 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.938882 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.938899 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:35 crc kubenswrapper[4854]: I1125 09:37:35.938912 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:35Z","lastTransitionTime":"2025-11-25T09:37:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.013418 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.013477 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.013525 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:37:36 crc kubenswrapper[4854]: E1125 09:37:36.013599 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.013434 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:37:36 crc kubenswrapper[4854]: E1125 09:37:36.013751 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:37:36 crc kubenswrapper[4854]: E1125 09:37:36.013814 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:37:36 crc kubenswrapper[4854]: E1125 09:37:36.013862 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rbb99" podUID="377b0f2c-4152-40db-a4c1-be3126061d7e" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.041468 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.041544 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.041566 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.041605 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.041620 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:36Z","lastTransitionTime":"2025-11-25T09:37:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.145479 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.145551 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.145576 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.145605 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.145628 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:36Z","lastTransitionTime":"2025-11-25T09:37:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.248958 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.249010 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.249025 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.249044 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.249059 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:36Z","lastTransitionTime":"2025-11-25T09:37:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.347652 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gt7mq_b2e0e8c4-35b0-4ca8-acec-d6c94cf76354/ovnkube-controller/2.log" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.350596 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.350634 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.350646 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.350664 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.350701 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:36Z","lastTransitionTime":"2025-11-25T09:37:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.351604 4854 scope.go:117] "RemoveContainer" containerID="1a6d7c2acd862f8700bbd5c57584e1b55c5f1da6ff0c8431f1cd400508713d7c" Nov 25 09:37:36 crc kubenswrapper[4854]: E1125 09:37:36.351779 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-gt7mq_openshift-ovn-kubernetes(b2e0e8c4-35b0-4ca8-acec-d6c94cf76354)\"" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.366812 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe8c3a2-bb2a-4a49-b104-fb0f10a74b74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c67a2da54854403d420ac7c05ea4211260914f5123d6ac5f086e22b88256a331\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://64018e6a7f7cdfcdd8339c676ebae79d9047a099a69048351406278d2a142863\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53b3a5ae9621d23b07fe07044dba09946f559dc5b8e75d8f1b6a2ccd0c672247\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.383239 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.396002 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b789d7d13805b447638f66bcfa89b997c9ad47c92e85a776e97e70ea1a7e950\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e898d302f42097c6c149260d69f6cdc0bc4088e1b86714c3344a375b16cd7a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9qdk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.411385 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36afbdef-e971-4c88-b8fd-0f289b9dd07c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:36:49Z\\\",\\\"message\\\":\\\"W1125 09:36:38.376967 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:36:38.377570 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063398 cert, and key in /tmp/serving-cert-3164028442/serving-signer.crt, /tmp/serving-cert-3164028442/serving-signer.key\\\\nI1125 09:36:38.736353 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:36:38.739775 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:36:38.739926 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:36:38.743151 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3164028442/tls.crt::/tmp/serving-cert-3164028442/tls.key\\\\\\\"\\\\nF1125 09:36:49.315575 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.423436 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba9be543-b9b2-43fe-b387-e4e8d93a5f33\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://970f252dcc9145faeafc8d8bd7ac63b329710552096733720038b3cc76a6739e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccf28beb4109e4bf20284e9549e2063585667d175f20d55ccce18b7b7441e4e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://367de262895fba6dd5ef497f51059236c7b388d54847ea16f79bc1157017b0ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a541e16003f5246d94c8e614253336b755097bbb7989b98efeffba79091aa5ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a541e16003f5246d94c8e614253336b755097bbb7989b98efeffba79091aa5ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.435890 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd00115c937d7bd2a5f0c16c846127b630c250ba8bd33ec244ee0df3c2649e2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.447393 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l4c8x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"baab3391-6269-467e-be1c-c992c82ddd7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caac5bab62b2f161eafa08b0d713c2619e27e61b24b106286f9e08c46af2c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xkwcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l4c8x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.452384 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.452430 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.452444 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.452461 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.452471 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:36Z","lastTransitionTime":"2025-11-25T09:37:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.458373 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rbb99" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"377b0f2c-4152-40db-a4c1-be3126061d7e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr9p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr9p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:37:11Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rbb99\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.468859 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.479717 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.494193 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34f44032-4367-4650-b4e0-02aa8d3209ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a71ae62f5763cc4804ec5c00a4c8619d470790e92dd99e1afe671ae166458c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkjjq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.507405 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-txnt5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e9f759-2eea-43cd-9e0a-6f149785c431\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vhlt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-txnt5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.519045 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a8e6c353d6a9d159c07ccbd19c8d659f360fba227bd027a3d3e32aadff5724\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.546963 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13fffc919890dba4c50685bc1aa11c87a1c3e8ca4c22d40bf3993ecb29cb2b7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e04e784232d124ccfcfcbb42a1b3f2fe4dd737520c60374a9fe1d1dcd8a9a56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.561926 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.561999 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.562012 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.562118 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.562144 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:36Z","lastTransitionTime":"2025-11-25T09:37:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.585148 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b34a8b788c0ff6e0d0cb1dc375ec9e2ae9a748c911713adc69f9e37f576222ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a694f4eb324ec480c999e6d57a53599b61b5bb42c3f3342e4672bc90b2e095e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e15435107b0cdf606bf3d4f78ddfec8b6641611a912f3fea51ba1ffc3031df3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb143029f5bbd51e49119aba69fcb5ecb8febacbdfa089ccef1298a4f372278b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://225d630b20a5da5172de2d03044794bbf6ac21d95421a502d21c642c92421e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6acdb40732904b212fc33575cf482c9e194722160e66442aee0619cda7f2c755\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a6d7c2acd862f8700bbd5c57584e1b55c5f1da6ff0c8431f1cd400508713d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a6d7c2acd862f8700bbd5c57584e1b55c5f1da6ff0c8431f1cd400508713d7c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:37:34Z\\\",\\\"message\\\":\\\"ID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-controller-manager/controller-manager\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.149\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1125 09:37:34.797009 6613 services_controller.go:356] Processing sync for service openshift-multus/network-metrics-service for network=default\\\\nI1125 09:37:34.797784 6613 services_controller.go:360] Finished syncing service network-metrics-service on namespace openshift-multus for network=default : 776.472µs\\\\nI1125 09:37:34.797827 6613 services_controller.go:356] Processing sync for service openshift-image-registry/image-registry-operator for network=default\\\\nI1125 09:37:34.797857 6613 services_controller.go:360] Finished syncing service image-registry-operator on namespace openshift-image-registry for network=default : 30.941µs\\\\nF1125 09:37:34.797882 6613 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:34Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-gt7mq_openshift-ovn-kubernetes(b2e0e8c4-35b0-4ca8-acec-d6c94cf76354)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50a479ca6721781365a54ff1fd8aad2b8b97e0f298fe46b62c51cbc2434e980b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gt7mq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.595310 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lf42b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0358b43-024d-430b-b886-c3ba51fb479e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://77304ae3027f1ed588bd648fe7573d6a3bc7502ab02db6687fd7f9a885429104\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9zdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lf42b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.606169 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zd22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7cb2c023-6f3b-4c24-a49d-1f4686b5eca5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://defcab141d127cd4f8abcc2c984b23984b793a596c443ce6ab8317400f5a4ed5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9v8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00a26dc878dfb14a49676fc2c48da211fe1d5a28609cd5f564512cdb6a60fe6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9v8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:37:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7zd22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.664186 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.664246 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.664255 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.664270 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.664282 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:36Z","lastTransitionTime":"2025-11-25T09:37:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.766511 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.766566 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.766582 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.766602 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.766617 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:36Z","lastTransitionTime":"2025-11-25T09:37:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.873308 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.873380 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.873404 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.873433 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.873457 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:36Z","lastTransitionTime":"2025-11-25T09:37:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.976329 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.976364 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.976372 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.976387 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:36 crc kubenswrapper[4854]: I1125 09:37:36.976396 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:36Z","lastTransitionTime":"2025-11-25T09:37:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.079248 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.079308 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.079327 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.079351 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.079370 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:37Z","lastTransitionTime":"2025-11-25T09:37:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.182364 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.182415 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.182425 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.182443 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.182455 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:37Z","lastTransitionTime":"2025-11-25T09:37:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.286334 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.286385 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.286406 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.286431 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.286447 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:37Z","lastTransitionTime":"2025-11-25T09:37:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.390000 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.390057 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.390077 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.390109 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.390132 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:37Z","lastTransitionTime":"2025-11-25T09:37:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.492178 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.492212 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.492221 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.492235 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.492246 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:37Z","lastTransitionTime":"2025-11-25T09:37:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.594452 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.594500 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.594512 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.594531 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.594542 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:37Z","lastTransitionTime":"2025-11-25T09:37:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.697007 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.697086 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.697099 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.697114 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.697126 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:37Z","lastTransitionTime":"2025-11-25T09:37:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.799994 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.800046 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.800060 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.800078 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.800090 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:37Z","lastTransitionTime":"2025-11-25T09:37:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.903096 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.903140 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.903152 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.903170 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:37 crc kubenswrapper[4854]: I1125 09:37:37.903183 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:37Z","lastTransitionTime":"2025-11-25T09:37:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.005615 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.005683 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.005697 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.005714 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.005727 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:38Z","lastTransitionTime":"2025-11-25T09:37:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.013354 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.013405 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.013371 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:37:38 crc kubenswrapper[4854]: E1125 09:37:38.013529 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:37:38 crc kubenswrapper[4854]: E1125 09:37:38.013700 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.013759 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:37:38 crc kubenswrapper[4854]: E1125 09:37:38.013898 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rbb99" podUID="377b0f2c-4152-40db-a4c1-be3126061d7e" Nov 25 09:37:38 crc kubenswrapper[4854]: E1125 09:37:38.014049 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.108691 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.108741 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.108749 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.108765 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.108776 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:38Z","lastTransitionTime":"2025-11-25T09:37:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.211165 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.211199 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.211208 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.211221 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.211231 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:38Z","lastTransitionTime":"2025-11-25T09:37:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.313942 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.314074 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.314102 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.314138 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.314160 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:38Z","lastTransitionTime":"2025-11-25T09:37:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.417252 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.417321 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.417344 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.417371 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.417396 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:38Z","lastTransitionTime":"2025-11-25T09:37:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.520360 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.520413 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.520425 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.520442 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.520454 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:38Z","lastTransitionTime":"2025-11-25T09:37:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.622801 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.622853 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.622862 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.622879 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.622891 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:38Z","lastTransitionTime":"2025-11-25T09:37:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.724730 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.724804 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.724818 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.724833 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.724844 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:38Z","lastTransitionTime":"2025-11-25T09:37:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.827313 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.827365 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.827377 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.827397 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.827409 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:38Z","lastTransitionTime":"2025-11-25T09:37:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.929331 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.929370 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.929378 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.929393 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:38 crc kubenswrapper[4854]: I1125 09:37:38.929404 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:38Z","lastTransitionTime":"2025-11-25T09:37:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.031161 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.031205 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.031218 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.031235 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.031247 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:39Z","lastTransitionTime":"2025-11-25T09:37:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.134394 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.134448 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.134458 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.134476 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.134486 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:39Z","lastTransitionTime":"2025-11-25T09:37:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.237206 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.237252 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.237263 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.237277 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.237287 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:39Z","lastTransitionTime":"2025-11-25T09:37:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.339856 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.339906 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.339917 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.339938 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.339949 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:39Z","lastTransitionTime":"2025-11-25T09:37:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.446114 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.446156 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.446168 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.446184 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.446195 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:39Z","lastTransitionTime":"2025-11-25T09:37:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.548627 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.548659 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.548690 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.548707 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.548718 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:39Z","lastTransitionTime":"2025-11-25T09:37:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.651959 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.652009 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.652024 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.652048 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.652063 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:39Z","lastTransitionTime":"2025-11-25T09:37:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.754608 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.754645 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.754657 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.754690 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.754701 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:39Z","lastTransitionTime":"2025-11-25T09:37:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.857787 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.857840 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.857850 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.857868 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.857880 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:39Z","lastTransitionTime":"2025-11-25T09:37:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.960284 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.960321 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.960330 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.960367 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:39 crc kubenswrapper[4854]: I1125 09:37:39.960378 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:39Z","lastTransitionTime":"2025-11-25T09:37:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.013388 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.013468 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:37:40 crc kubenswrapper[4854]: E1125 09:37:40.013519 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rbb99" podUID="377b0f2c-4152-40db-a4c1-be3126061d7e" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.013532 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:37:40 crc kubenswrapper[4854]: E1125 09:37:40.013626 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.013475 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:37:40 crc kubenswrapper[4854]: E1125 09:37:40.013857 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:37:40 crc kubenswrapper[4854]: E1125 09:37:40.013947 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.062640 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.062709 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.062721 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.062739 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.062751 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:40Z","lastTransitionTime":"2025-11-25T09:37:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.164897 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.164963 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.164974 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.164993 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.165003 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:40Z","lastTransitionTime":"2025-11-25T09:37:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.267982 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.268036 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.268050 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.268067 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.268078 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:40Z","lastTransitionTime":"2025-11-25T09:37:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.370832 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.370880 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.370893 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.370912 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.370925 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:40Z","lastTransitionTime":"2025-11-25T09:37:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.473347 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.473395 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.473407 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.473422 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.473432 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:40Z","lastTransitionTime":"2025-11-25T09:37:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.575396 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.575429 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.575437 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.575450 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.575459 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:40Z","lastTransitionTime":"2025-11-25T09:37:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.677295 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.677369 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.677397 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.677413 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.677424 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:40Z","lastTransitionTime":"2025-11-25T09:37:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.780023 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.780062 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.780074 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.780090 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.780100 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:40Z","lastTransitionTime":"2025-11-25T09:37:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.882150 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.882198 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.882215 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.882231 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.882242 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:40Z","lastTransitionTime":"2025-11-25T09:37:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.984435 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.984479 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.984490 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.984507 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:40 crc kubenswrapper[4854]: I1125 09:37:40.984529 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:40Z","lastTransitionTime":"2025-11-25T09:37:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.087611 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.087685 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.087700 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.087715 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.087727 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:41Z","lastTransitionTime":"2025-11-25T09:37:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.189858 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.190270 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.190462 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.190714 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.190868 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:41Z","lastTransitionTime":"2025-11-25T09:37:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.294071 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.294110 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.294123 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.294139 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.294150 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:41Z","lastTransitionTime":"2025-11-25T09:37:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.396834 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.396880 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.396890 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.396907 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.396919 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:41Z","lastTransitionTime":"2025-11-25T09:37:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.499755 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.499804 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.499813 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.499829 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.499839 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:41Z","lastTransitionTime":"2025-11-25T09:37:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.602062 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.602099 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.602108 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.602122 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.602131 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:41Z","lastTransitionTime":"2025-11-25T09:37:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.704829 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.704877 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.704889 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.704905 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.704916 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:41Z","lastTransitionTime":"2025-11-25T09:37:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.806774 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.806811 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.806824 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.806837 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.806846 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:41Z","lastTransitionTime":"2025-11-25T09:37:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.909265 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.909330 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.909343 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.909365 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:41 crc kubenswrapper[4854]: I1125 09:37:41.909376 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:41Z","lastTransitionTime":"2025-11-25T09:37:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.011692 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.011725 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.011748 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.011763 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.011772 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:42Z","lastTransitionTime":"2025-11-25T09:37:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.013100 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.013155 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:37:42 crc kubenswrapper[4854]: E1125 09:37:42.013195 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rbb99" podUID="377b0f2c-4152-40db-a4c1-be3126061d7e" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.013159 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.013161 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:37:42 crc kubenswrapper[4854]: E1125 09:37:42.013349 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:37:42 crc kubenswrapper[4854]: E1125 09:37:42.013469 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:37:42 crc kubenswrapper[4854]: E1125 09:37:42.013550 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.115029 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.115078 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.115090 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.115105 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.115119 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:42Z","lastTransitionTime":"2025-11-25T09:37:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.217282 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.217548 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.218090 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.218369 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.218736 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:42Z","lastTransitionTime":"2025-11-25T09:37:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.320984 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.321216 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.321315 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.321396 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.321518 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:42Z","lastTransitionTime":"2025-11-25T09:37:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.423938 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.423982 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.424021 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.424039 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.424051 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:42Z","lastTransitionTime":"2025-11-25T09:37:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.526400 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.526644 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.526820 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.526890 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.526949 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:42Z","lastTransitionTime":"2025-11-25T09:37:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.628863 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.629146 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.629221 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.629307 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.629383 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:42Z","lastTransitionTime":"2025-11-25T09:37:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.731789 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.732070 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.732157 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.732254 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.732343 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:42Z","lastTransitionTime":"2025-11-25T09:37:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.835238 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.835277 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.835286 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.835302 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.835312 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:42Z","lastTransitionTime":"2025-11-25T09:37:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.937715 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.937757 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.937769 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.937786 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:42 crc kubenswrapper[4854]: I1125 09:37:42.937798 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:42Z","lastTransitionTime":"2025-11-25T09:37:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.032382 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/377b0f2c-4152-40db-a4c1-be3126061d7e-metrics-certs\") pod \"network-metrics-daemon-rbb99\" (UID: \"377b0f2c-4152-40db-a4c1-be3126061d7e\") " pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:37:43 crc kubenswrapper[4854]: E1125 09:37:43.032545 4854 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:37:43 crc kubenswrapper[4854]: E1125 09:37:43.032609 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/377b0f2c-4152-40db-a4c1-be3126061d7e-metrics-certs podName:377b0f2c-4152-40db-a4c1-be3126061d7e nodeName:}" failed. No retries permitted until 2025-11-25 09:38:15.032590138 +0000 UTC m=+100.885583514 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/377b0f2c-4152-40db-a4c1-be3126061d7e-metrics-certs") pod "network-metrics-daemon-rbb99" (UID: "377b0f2c-4152-40db-a4c1-be3126061d7e") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.039557 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.039596 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.039604 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.039620 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.039630 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:43Z","lastTransitionTime":"2025-11-25T09:37:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.142437 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.142486 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.142497 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.142515 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.142529 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:43Z","lastTransitionTime":"2025-11-25T09:37:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.235040 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.235089 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.235101 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.235118 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.235130 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:43Z","lastTransitionTime":"2025-11-25T09:37:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:43 crc kubenswrapper[4854]: E1125 09:37:43.248062 4854 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a363dd8e-616a-41fb-b3a6-8f9b7ff40e37\\\",\\\"systemUUID\\\":\\\"9ec5d79d-dba9-49c0-8c51-26f030e53128\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:43Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.252272 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.252553 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.252647 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.252761 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.252842 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:43Z","lastTransitionTime":"2025-11-25T09:37:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:43 crc kubenswrapper[4854]: E1125 09:37:43.264165 4854 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a363dd8e-616a-41fb-b3a6-8f9b7ff40e37\\\",\\\"systemUUID\\\":\\\"9ec5d79d-dba9-49c0-8c51-26f030e53128\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:43Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.267442 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.267498 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.267511 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.267525 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.267534 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:43Z","lastTransitionTime":"2025-11-25T09:37:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:43 crc kubenswrapper[4854]: E1125 09:37:43.280862 4854 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a363dd8e-616a-41fb-b3a6-8f9b7ff40e37\\\",\\\"systemUUID\\\":\\\"9ec5d79d-dba9-49c0-8c51-26f030e53128\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:43Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.284803 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.284849 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.284861 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.284878 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.284889 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:43Z","lastTransitionTime":"2025-11-25T09:37:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:43 crc kubenswrapper[4854]: E1125 09:37:43.298336 4854 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a363dd8e-616a-41fb-b3a6-8f9b7ff40e37\\\",\\\"systemUUID\\\":\\\"9ec5d79d-dba9-49c0-8c51-26f030e53128\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:43Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.301817 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.301859 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.301870 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.301888 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.301900 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:43Z","lastTransitionTime":"2025-11-25T09:37:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:43 crc kubenswrapper[4854]: E1125 09:37:43.315628 4854 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:43Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a363dd8e-616a-41fb-b3a6-8f9b7ff40e37\\\",\\\"systemUUID\\\":\\\"9ec5d79d-dba9-49c0-8c51-26f030e53128\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:43Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:43 crc kubenswrapper[4854]: E1125 09:37:43.315863 4854 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.317431 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.317553 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.317646 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.317760 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.317910 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:43Z","lastTransitionTime":"2025-11-25T09:37:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.420712 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.420770 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.420784 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.420800 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.420809 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:43Z","lastTransitionTime":"2025-11-25T09:37:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.523530 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.523567 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.523578 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.523592 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.523602 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:43Z","lastTransitionTime":"2025-11-25T09:37:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.625383 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.625416 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.625424 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.625438 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.625446 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:43Z","lastTransitionTime":"2025-11-25T09:37:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.728015 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.728059 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.728075 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.728095 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.728110 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:43Z","lastTransitionTime":"2025-11-25T09:37:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.830008 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.830052 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.830064 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.830081 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.830092 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:43Z","lastTransitionTime":"2025-11-25T09:37:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.936201 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.936242 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.936251 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.936265 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:43 crc kubenswrapper[4854]: I1125 09:37:43.936274 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:43Z","lastTransitionTime":"2025-11-25T09:37:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.012383 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.012455 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.012505 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:37:44 crc kubenswrapper[4854]: E1125 09:37:44.012586 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.012455 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:37:44 crc kubenswrapper[4854]: E1125 09:37:44.012526 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rbb99" podUID="377b0f2c-4152-40db-a4c1-be3126061d7e" Nov 25 09:37:44 crc kubenswrapper[4854]: E1125 09:37:44.012658 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:37:44 crc kubenswrapper[4854]: E1125 09:37:44.012738 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.038547 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.038583 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.038593 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.038607 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.038617 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:44Z","lastTransitionTime":"2025-11-25T09:37:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.140965 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.141002 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.141013 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.141030 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.141042 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:44Z","lastTransitionTime":"2025-11-25T09:37:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.243744 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.243777 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.243788 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.243822 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.243833 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:44Z","lastTransitionTime":"2025-11-25T09:37:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.346338 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.346392 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.346401 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.346417 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.346429 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:44Z","lastTransitionTime":"2025-11-25T09:37:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.448527 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.448588 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.448601 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.448640 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.448654 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:44Z","lastTransitionTime":"2025-11-25T09:37:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.551068 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.551120 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.551133 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.551150 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.551166 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:44Z","lastTransitionTime":"2025-11-25T09:37:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.653930 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.653961 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.653971 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.653986 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.653998 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:44Z","lastTransitionTime":"2025-11-25T09:37:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.757154 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.757199 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.757211 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.757227 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.757240 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:44Z","lastTransitionTime":"2025-11-25T09:37:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.859498 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.859535 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.859546 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.859560 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.859570 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:44Z","lastTransitionTime":"2025-11-25T09:37:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.961235 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.961275 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.961284 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.961299 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:44 crc kubenswrapper[4854]: I1125 09:37:44.961309 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:44Z","lastTransitionTime":"2025-11-25T09:37:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.029100 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.038979 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.051265 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34f44032-4367-4650-b4e0-02aa8d3209ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a71ae62f5763cc4804ec5c00a4c8619d470790e92dd99e1afe671ae166458c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkjjq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.061394 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-txnt5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e9f759-2eea-43cd-9e0a-6f149785c431\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vhlt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-txnt5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.063394 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.063419 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.063430 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.063445 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.063455 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:45Z","lastTransitionTime":"2025-11-25T09:37:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.072135 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lf42b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0358b43-024d-430b-b886-c3ba51fb479e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://77304ae3027f1ed588bd648fe7573d6a3bc7502ab02db6687fd7f9a885429104\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9zdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lf42b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.083500 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zd22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7cb2c023-6f3b-4c24-a49d-1f4686b5eca5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://defcab141d127cd4f8abcc2c984b23984b793a596c443ce6ab8317400f5a4ed5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9v8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00a26dc878dfb14a49676fc2c48da211fe1d5a28609cd5f564512cdb6a60fe6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9v8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:37:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7zd22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.096969 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a8e6c353d6a9d159c07ccbd19c8d659f360fba227bd027a3d3e32aadff5724\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.110540 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13fffc919890dba4c50685bc1aa11c87a1c3e8ca4c22d40bf3993ecb29cb2b7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e04e784232d124ccfcfcbb42a1b3f2fe4dd737520c60374a9fe1d1dcd8a9a56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.130221 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b34a8b788c0ff6e0d0cb1dc375ec9e2ae9a748c911713adc69f9e37f576222ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a694f4eb324ec480c999e6d57a53599b61b5bb42c3f3342e4672bc90b2e095e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e15435107b0cdf606bf3d4f78ddfec8b6641611a912f3fea51ba1ffc3031df3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb143029f5bbd51e49119aba69fcb5ecb8febacbdfa089ccef1298a4f372278b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://225d630b20a5da5172de2d03044794bbf6ac21d95421a502d21c642c92421e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6acdb40732904b212fc33575cf482c9e194722160e66442aee0619cda7f2c755\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a6d7c2acd862f8700bbd5c57584e1b55c5f1da6ff0c8431f1cd400508713d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a6d7c2acd862f8700bbd5c57584e1b55c5f1da6ff0c8431f1cd400508713d7c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:37:34Z\\\",\\\"message\\\":\\\"ID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-controller-manager/controller-manager\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.149\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1125 09:37:34.797009 6613 services_controller.go:356] Processing sync for service openshift-multus/network-metrics-service for network=default\\\\nI1125 09:37:34.797784 6613 services_controller.go:360] Finished syncing service network-metrics-service on namespace openshift-multus for network=default : 776.472µs\\\\nI1125 09:37:34.797827 6613 services_controller.go:356] Processing sync for service openshift-image-registry/image-registry-operator for network=default\\\\nI1125 09:37:34.797857 6613 services_controller.go:360] Finished syncing service image-registry-operator on namespace openshift-image-registry for network=default : 30.941µs\\\\nF1125 09:37:34.797882 6613 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:34Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-gt7mq_openshift-ovn-kubernetes(b2e0e8c4-35b0-4ca8-acec-d6c94cf76354)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50a479ca6721781365a54ff1fd8aad2b8b97e0f298fe46b62c51cbc2434e980b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gt7mq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.143632 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe8c3a2-bb2a-4a49-b104-fb0f10a74b74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c67a2da54854403d420ac7c05ea4211260914f5123d6ac5f086e22b88256a331\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://64018e6a7f7cdfcdd8339c676ebae79d9047a099a69048351406278d2a142863\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53b3a5ae9621d23b07fe07044dba09946f559dc5b8e75d8f1b6a2ccd0c672247\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.153527 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.165319 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b789d7d13805b447638f66bcfa89b997c9ad47c92e85a776e97e70ea1a7e950\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e898d302f42097c6c149260d69f6cdc0bc4088e1b86714c3344a375b16cd7a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9qdk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.165901 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.165922 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.165930 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.165943 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.165951 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:45Z","lastTransitionTime":"2025-11-25T09:37:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.175429 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rbb99" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"377b0f2c-4152-40db-a4c1-be3126061d7e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr9p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr9p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:37:11Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rbb99\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.187125 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36afbdef-e971-4c88-b8fd-0f289b9dd07c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:36:49Z\\\",\\\"message\\\":\\\"W1125 09:36:38.376967 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:36:38.377570 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063398 cert, and key in /tmp/serving-cert-3164028442/serving-signer.crt, /tmp/serving-cert-3164028442/serving-signer.key\\\\nI1125 09:36:38.736353 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:36:38.739775 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:36:38.739926 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:36:38.743151 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3164028442/tls.crt::/tmp/serving-cert-3164028442/tls.key\\\\\\\"\\\\nF1125 09:36:49.315575 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.196405 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba9be543-b9b2-43fe-b387-e4e8d93a5f33\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://970f252dcc9145faeafc8d8bd7ac63b329710552096733720038b3cc76a6739e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccf28beb4109e4bf20284e9549e2063585667d175f20d55ccce18b7b7441e4e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://367de262895fba6dd5ef497f51059236c7b388d54847ea16f79bc1157017b0ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a541e16003f5246d94c8e614253336b755097bbb7989b98efeffba79091aa5ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a541e16003f5246d94c8e614253336b755097bbb7989b98efeffba79091aa5ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.205999 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd00115c937d7bd2a5f0c16c846127b630c250ba8bd33ec244ee0df3c2649e2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.214657 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l4c8x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"baab3391-6269-467e-be1c-c992c82ddd7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caac5bab62b2f161eafa08b0d713c2619e27e61b24b106286f9e08c46af2c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xkwcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l4c8x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.268546 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.268586 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.268595 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.268610 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.268620 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:45Z","lastTransitionTime":"2025-11-25T09:37:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.370793 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.370848 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.370858 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.370897 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.370906 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:45Z","lastTransitionTime":"2025-11-25T09:37:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.473107 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.473209 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.473224 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.473244 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.473258 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:45Z","lastTransitionTime":"2025-11-25T09:37:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.575518 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.575551 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.575560 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.575572 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.575581 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:45Z","lastTransitionTime":"2025-11-25T09:37:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.678251 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.678289 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.678298 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.678309 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.678321 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:45Z","lastTransitionTime":"2025-11-25T09:37:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.780472 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.780527 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.780539 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.780562 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.780576 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:45Z","lastTransitionTime":"2025-11-25T09:37:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.882575 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.882615 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.882624 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.882638 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.882649 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:45Z","lastTransitionTime":"2025-11-25T09:37:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.984742 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.984786 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.984796 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.984810 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:45 crc kubenswrapper[4854]: I1125 09:37:45.984819 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:45Z","lastTransitionTime":"2025-11-25T09:37:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.012376 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.012398 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.012411 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.012419 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:37:46 crc kubenswrapper[4854]: E1125 09:37:46.012904 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:37:46 crc kubenswrapper[4854]: E1125 09:37:46.013136 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:37:46 crc kubenswrapper[4854]: E1125 09:37:46.013221 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rbb99" podUID="377b0f2c-4152-40db-a4c1-be3126061d7e" Nov 25 09:37:46 crc kubenswrapper[4854]: E1125 09:37:46.013268 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.087032 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.087069 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.087082 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.087101 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.087115 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:46Z","lastTransitionTime":"2025-11-25T09:37:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.189367 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.189409 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.189420 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.189439 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.189450 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:46Z","lastTransitionTime":"2025-11-25T09:37:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.292150 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.292760 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.292784 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.292806 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.292816 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:46Z","lastTransitionTime":"2025-11-25T09:37:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.379537 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-txnt5_a0e9f759-2eea-43cd-9e0a-6f149785c431/kube-multus/0.log" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.379600 4854 generic.go:334] "Generic (PLEG): container finished" podID="a0e9f759-2eea-43cd-9e0a-6f149785c431" containerID="703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20" exitCode=1 Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.379635 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-txnt5" event={"ID":"a0e9f759-2eea-43cd-9e0a-6f149785c431","Type":"ContainerDied","Data":"703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20"} Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.380066 4854 scope.go:117] "RemoveContainer" containerID="703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.392459 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36afbdef-e971-4c88-b8fd-0f289b9dd07c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:36:49Z\\\",\\\"message\\\":\\\"W1125 09:36:38.376967 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:36:38.377570 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063398 cert, and key in /tmp/serving-cert-3164028442/serving-signer.crt, /tmp/serving-cert-3164028442/serving-signer.key\\\\nI1125 09:36:38.736353 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:36:38.739775 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:36:38.739926 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:36:38.743151 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3164028442/tls.crt::/tmp/serving-cert-3164028442/tls.key\\\\\\\"\\\\nF1125 09:36:49.315575 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.397533 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.397577 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.397589 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.397606 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.397618 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:46Z","lastTransitionTime":"2025-11-25T09:37:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.403293 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba9be543-b9b2-43fe-b387-e4e8d93a5f33\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://970f252dcc9145faeafc8d8bd7ac63b329710552096733720038b3cc76a6739e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccf28beb4109e4bf20284e9549e2063585667d175f20d55ccce18b7b7441e4e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://367de262895fba6dd5ef497f51059236c7b388d54847ea16f79bc1157017b0ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a541e16003f5246d94c8e614253336b755097bbb7989b98efeffba79091aa5ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a541e16003f5246d94c8e614253336b755097bbb7989b98efeffba79091aa5ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.416416 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd00115c937d7bd2a5f0c16c846127b630c250ba8bd33ec244ee0df3c2649e2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.427655 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l4c8x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"baab3391-6269-467e-be1c-c992c82ddd7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caac5bab62b2f161eafa08b0d713c2619e27e61b24b106286f9e08c46af2c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xkwcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l4c8x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.438163 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rbb99" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"377b0f2c-4152-40db-a4c1-be3126061d7e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr9p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr9p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:37:11Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rbb99\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.452495 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.464851 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.482506 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34f44032-4367-4650-b4e0-02aa8d3209ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a71ae62f5763cc4804ec5c00a4c8619d470790e92dd99e1afe671ae166458c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkjjq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.495866 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-txnt5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e9f759-2eea-43cd-9e0a-6f149785c431\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:37:45Z\\\",\\\"message\\\":\\\"2025-11-25T09:37:00+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_1391125d-1ba3-469c-948a-3d4482bec319\\\\n2025-11-25T09:37:00+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_1391125d-1ba3-469c-948a-3d4482bec319 to /host/opt/cni/bin/\\\\n2025-11-25T09:37:00Z [verbose] multus-daemon started\\\\n2025-11-25T09:37:00Z [verbose] Readiness Indicator file check\\\\n2025-11-25T09:37:45Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vhlt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-txnt5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.499469 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.499495 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.499504 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.499518 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.499528 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:46Z","lastTransitionTime":"2025-11-25T09:37:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.508858 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a8e6c353d6a9d159c07ccbd19c8d659f360fba227bd027a3d3e32aadff5724\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.521321 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13fffc919890dba4c50685bc1aa11c87a1c3e8ca4c22d40bf3993ecb29cb2b7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e04e784232d124ccfcfcbb42a1b3f2fe4dd737520c60374a9fe1d1dcd8a9a56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.539623 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b34a8b788c0ff6e0d0cb1dc375ec9e2ae9a748c911713adc69f9e37f576222ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a694f4eb324ec480c999e6d57a53599b61b5bb42c3f3342e4672bc90b2e095e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e15435107b0cdf606bf3d4f78ddfec8b6641611a912f3fea51ba1ffc3031df3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb143029f5bbd51e49119aba69fcb5ecb8febacbdfa089ccef1298a4f372278b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://225d630b20a5da5172de2d03044794bbf6ac21d95421a502d21c642c92421e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6acdb40732904b212fc33575cf482c9e194722160e66442aee0619cda7f2c755\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a6d7c2acd862f8700bbd5c57584e1b55c5f1da6ff0c8431f1cd400508713d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a6d7c2acd862f8700bbd5c57584e1b55c5f1da6ff0c8431f1cd400508713d7c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:37:34Z\\\",\\\"message\\\":\\\"ID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-controller-manager/controller-manager\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.149\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1125 09:37:34.797009 6613 services_controller.go:356] Processing sync for service openshift-multus/network-metrics-service for network=default\\\\nI1125 09:37:34.797784 6613 services_controller.go:360] Finished syncing service network-metrics-service on namespace openshift-multus for network=default : 776.472µs\\\\nI1125 09:37:34.797827 6613 services_controller.go:356] Processing sync for service openshift-image-registry/image-registry-operator for network=default\\\\nI1125 09:37:34.797857 6613 services_controller.go:360] Finished syncing service image-registry-operator on namespace openshift-image-registry for network=default : 30.941µs\\\\nF1125 09:37:34.797882 6613 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:34Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-gt7mq_openshift-ovn-kubernetes(b2e0e8c4-35b0-4ca8-acec-d6c94cf76354)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50a479ca6721781365a54ff1fd8aad2b8b97e0f298fe46b62c51cbc2434e980b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gt7mq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.551073 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lf42b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0358b43-024d-430b-b886-c3ba51fb479e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://77304ae3027f1ed588bd648fe7573d6a3bc7502ab02db6687fd7f9a885429104\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9zdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lf42b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.563256 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zd22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7cb2c023-6f3b-4c24-a49d-1f4686b5eca5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://defcab141d127cd4f8abcc2c984b23984b793a596c443ce6ab8317400f5a4ed5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9v8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00a26dc878dfb14a49676fc2c48da211fe1d5a28609cd5f564512cdb6a60fe6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9v8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:37:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7zd22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.575450 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe8c3a2-bb2a-4a49-b104-fb0f10a74b74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c67a2da54854403d420ac7c05ea4211260914f5123d6ac5f086e22b88256a331\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://64018e6a7f7cdfcdd8339c676ebae79d9047a099a69048351406278d2a142863\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53b3a5ae9621d23b07fe07044dba09946f559dc5b8e75d8f1b6a2ccd0c672247\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.586821 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.596119 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b789d7d13805b447638f66bcfa89b997c9ad47c92e85a776e97e70ea1a7e950\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e898d302f42097c6c149260d69f6cdc0bc4088e1b86714c3344a375b16cd7a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9qdk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.601661 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.601718 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.601727 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.601741 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.601751 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:46Z","lastTransitionTime":"2025-11-25T09:37:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.703464 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.703496 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.703507 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.703524 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.703534 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:46Z","lastTransitionTime":"2025-11-25T09:37:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.805951 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.805989 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.805999 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.806024 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.806034 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:46Z","lastTransitionTime":"2025-11-25T09:37:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.908130 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.908170 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.908180 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.908194 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:46 crc kubenswrapper[4854]: I1125 09:37:46.908204 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:46Z","lastTransitionTime":"2025-11-25T09:37:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.010698 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.010729 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.010737 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.010749 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.010757 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:47Z","lastTransitionTime":"2025-11-25T09:37:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.112504 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.112539 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.112547 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.112560 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.112570 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:47Z","lastTransitionTime":"2025-11-25T09:37:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.214449 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.214483 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.214491 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.214505 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.214514 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:47Z","lastTransitionTime":"2025-11-25T09:37:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.316392 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.316438 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.316450 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.316470 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.316488 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:47Z","lastTransitionTime":"2025-11-25T09:37:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.390076 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-txnt5_a0e9f759-2eea-43cd-9e0a-6f149785c431/kube-multus/0.log" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.390138 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-txnt5" event={"ID":"a0e9f759-2eea-43cd-9e0a-6f149785c431","Type":"ContainerStarted","Data":"5feac493298404321ee4fceca3870193d9a2cb42b9f7d769848893b756653fb9"} Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.402111 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:47Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.413746 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:47Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.418425 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.418457 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.418466 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.418480 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.418490 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:47Z","lastTransitionTime":"2025-11-25T09:37:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.427214 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34f44032-4367-4650-b4e0-02aa8d3209ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a71ae62f5763cc4804ec5c00a4c8619d470790e92dd99e1afe671ae166458c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkjjq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:47Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.439877 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-txnt5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e9f759-2eea-43cd-9e0a-6f149785c431\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5feac493298404321ee4fceca3870193d9a2cb42b9f7d769848893b756653fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:37:45Z\\\",\\\"message\\\":\\\"2025-11-25T09:37:00+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_1391125d-1ba3-469c-948a-3d4482bec319\\\\n2025-11-25T09:37:00+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_1391125d-1ba3-469c-948a-3d4482bec319 to /host/opt/cni/bin/\\\\n2025-11-25T09:37:00Z [verbose] multus-daemon started\\\\n2025-11-25T09:37:00Z [verbose] Readiness Indicator file check\\\\n2025-11-25T09:37:45Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vhlt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-txnt5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:47Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.454114 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a8e6c353d6a9d159c07ccbd19c8d659f360fba227bd027a3d3e32aadff5724\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:47Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.466784 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13fffc919890dba4c50685bc1aa11c87a1c3e8ca4c22d40bf3993ecb29cb2b7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e04e784232d124ccfcfcbb42a1b3f2fe4dd737520c60374a9fe1d1dcd8a9a56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:47Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.486159 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b34a8b788c0ff6e0d0cb1dc375ec9e2ae9a748c911713adc69f9e37f576222ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a694f4eb324ec480c999e6d57a53599b61b5bb42c3f3342e4672bc90b2e095e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e15435107b0cdf606bf3d4f78ddfec8b6641611a912f3fea51ba1ffc3031df3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb143029f5bbd51e49119aba69fcb5ecb8febacbdfa089ccef1298a4f372278b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://225d630b20a5da5172de2d03044794bbf6ac21d95421a502d21c642c92421e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6acdb40732904b212fc33575cf482c9e194722160e66442aee0619cda7f2c755\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a6d7c2acd862f8700bbd5c57584e1b55c5f1da6ff0c8431f1cd400508713d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a6d7c2acd862f8700bbd5c57584e1b55c5f1da6ff0c8431f1cd400508713d7c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:37:34Z\\\",\\\"message\\\":\\\"ID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-controller-manager/controller-manager\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.149\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1125 09:37:34.797009 6613 services_controller.go:356] Processing sync for service openshift-multus/network-metrics-service for network=default\\\\nI1125 09:37:34.797784 6613 services_controller.go:360] Finished syncing service network-metrics-service on namespace openshift-multus for network=default : 776.472µs\\\\nI1125 09:37:34.797827 6613 services_controller.go:356] Processing sync for service openshift-image-registry/image-registry-operator for network=default\\\\nI1125 09:37:34.797857 6613 services_controller.go:360] Finished syncing service image-registry-operator on namespace openshift-image-registry for network=default : 30.941µs\\\\nF1125 09:37:34.797882 6613 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:34Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-gt7mq_openshift-ovn-kubernetes(b2e0e8c4-35b0-4ca8-acec-d6c94cf76354)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50a479ca6721781365a54ff1fd8aad2b8b97e0f298fe46b62c51cbc2434e980b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gt7mq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:47Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.496085 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lf42b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0358b43-024d-430b-b886-c3ba51fb479e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://77304ae3027f1ed588bd648fe7573d6a3bc7502ab02db6687fd7f9a885429104\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9zdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lf42b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:47Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.507243 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zd22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7cb2c023-6f3b-4c24-a49d-1f4686b5eca5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://defcab141d127cd4f8abcc2c984b23984b793a596c443ce6ab8317400f5a4ed5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9v8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00a26dc878dfb14a49676fc2c48da211fe1d5a28609cd5f564512cdb6a60fe6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9v8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:37:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7zd22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:47Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.521093 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe8c3a2-bb2a-4a49-b104-fb0f10a74b74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c67a2da54854403d420ac7c05ea4211260914f5123d6ac5f086e22b88256a331\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://64018e6a7f7cdfcdd8339c676ebae79d9047a099a69048351406278d2a142863\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53b3a5ae9621d23b07fe07044dba09946f559dc5b8e75d8f1b6a2ccd0c672247\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:47Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.522547 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.522588 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.522598 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.522613 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.522623 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:47Z","lastTransitionTime":"2025-11-25T09:37:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.533383 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:47Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.543915 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b789d7d13805b447638f66bcfa89b997c9ad47c92e85a776e97e70ea1a7e950\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e898d302f42097c6c149260d69f6cdc0bc4088e1b86714c3344a375b16cd7a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9qdk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:47Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.556789 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36afbdef-e971-4c88-b8fd-0f289b9dd07c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:36:49Z\\\",\\\"message\\\":\\\"W1125 09:36:38.376967 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:36:38.377570 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063398 cert, and key in /tmp/serving-cert-3164028442/serving-signer.crt, /tmp/serving-cert-3164028442/serving-signer.key\\\\nI1125 09:36:38.736353 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:36:38.739775 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:36:38.739926 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:36:38.743151 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3164028442/tls.crt::/tmp/serving-cert-3164028442/tls.key\\\\\\\"\\\\nF1125 09:36:49.315575 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:47Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.568920 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba9be543-b9b2-43fe-b387-e4e8d93a5f33\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://970f252dcc9145faeafc8d8bd7ac63b329710552096733720038b3cc76a6739e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccf28beb4109e4bf20284e9549e2063585667d175f20d55ccce18b7b7441e4e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://367de262895fba6dd5ef497f51059236c7b388d54847ea16f79bc1157017b0ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a541e16003f5246d94c8e614253336b755097bbb7989b98efeffba79091aa5ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a541e16003f5246d94c8e614253336b755097bbb7989b98efeffba79091aa5ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:47Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.580811 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd00115c937d7bd2a5f0c16c846127b630c250ba8bd33ec244ee0df3c2649e2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:47Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.590519 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l4c8x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"baab3391-6269-467e-be1c-c992c82ddd7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caac5bab62b2f161eafa08b0d713c2619e27e61b24b106286f9e08c46af2c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xkwcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l4c8x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:47Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.602606 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rbb99" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"377b0f2c-4152-40db-a4c1-be3126061d7e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr9p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr9p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:37:11Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rbb99\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:47Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.624567 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.624605 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.624622 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.624637 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.624648 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:47Z","lastTransitionTime":"2025-11-25T09:37:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.727271 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.727315 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.727324 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.727341 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.727353 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:47Z","lastTransitionTime":"2025-11-25T09:37:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.829922 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.830005 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.830019 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.830037 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.830077 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:47Z","lastTransitionTime":"2025-11-25T09:37:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.932484 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.932575 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.932598 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.932616 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:47 crc kubenswrapper[4854]: I1125 09:37:47.932630 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:47Z","lastTransitionTime":"2025-11-25T09:37:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.013073 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.013080 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.013093 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.013185 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:37:48 crc kubenswrapper[4854]: E1125 09:37:48.013316 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:37:48 crc kubenswrapper[4854]: E1125 09:37:48.013447 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:37:48 crc kubenswrapper[4854]: E1125 09:37:48.013700 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rbb99" podUID="377b0f2c-4152-40db-a4c1-be3126061d7e" Nov 25 09:37:48 crc kubenswrapper[4854]: E1125 09:37:48.013762 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.034565 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.034602 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.034613 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.034627 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.034636 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:48Z","lastTransitionTime":"2025-11-25T09:37:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.136706 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.136747 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.136758 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.136773 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.136783 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:48Z","lastTransitionTime":"2025-11-25T09:37:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.239731 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.239809 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.239821 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.239838 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.239853 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:48Z","lastTransitionTime":"2025-11-25T09:37:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.342269 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.342318 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.342330 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.342351 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.342366 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:48Z","lastTransitionTime":"2025-11-25T09:37:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.445172 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.445412 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.445501 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.445591 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.445696 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:48Z","lastTransitionTime":"2025-11-25T09:37:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.548182 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.548224 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.548232 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.548248 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.548257 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:48Z","lastTransitionTime":"2025-11-25T09:37:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.650666 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.650734 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.650747 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.650761 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.650771 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:48Z","lastTransitionTime":"2025-11-25T09:37:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.753354 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.753385 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.753393 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.753405 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.753413 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:48Z","lastTransitionTime":"2025-11-25T09:37:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.855474 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.855515 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.855525 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.855540 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.855550 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:48Z","lastTransitionTime":"2025-11-25T09:37:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.958592 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.958630 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.958641 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.958657 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:48 crc kubenswrapper[4854]: I1125 09:37:48.958687 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:48Z","lastTransitionTime":"2025-11-25T09:37:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.061288 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.061322 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.061332 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.061347 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.061358 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:49Z","lastTransitionTime":"2025-11-25T09:37:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.163949 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.163988 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.163999 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.164014 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.164025 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:49Z","lastTransitionTime":"2025-11-25T09:37:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.266543 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.266580 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.266590 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.266606 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.266621 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:49Z","lastTransitionTime":"2025-11-25T09:37:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.369647 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.369696 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.369708 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.369726 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.369740 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:49Z","lastTransitionTime":"2025-11-25T09:37:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.471721 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.471762 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.471774 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.471790 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.471802 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:49Z","lastTransitionTime":"2025-11-25T09:37:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.574797 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.574838 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.574847 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.574861 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.574872 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:49Z","lastTransitionTime":"2025-11-25T09:37:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.677044 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.677100 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.677112 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.677141 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.677153 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:49Z","lastTransitionTime":"2025-11-25T09:37:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.779258 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.779527 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.779599 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.779664 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.779747 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:49Z","lastTransitionTime":"2025-11-25T09:37:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.882315 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.882352 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.882360 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.882374 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.882384 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:49Z","lastTransitionTime":"2025-11-25T09:37:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.984842 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.985104 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.985182 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.985266 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:49 crc kubenswrapper[4854]: I1125 09:37:49.985355 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:49Z","lastTransitionTime":"2025-11-25T09:37:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.012499 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.012499 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.012572 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.012625 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:37:50 crc kubenswrapper[4854]: E1125 09:37:50.013016 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rbb99" podUID="377b0f2c-4152-40db-a4c1-be3126061d7e" Nov 25 09:37:50 crc kubenswrapper[4854]: E1125 09:37:50.013076 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:37:50 crc kubenswrapper[4854]: E1125 09:37:50.013182 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:37:50 crc kubenswrapper[4854]: E1125 09:37:50.013281 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.013821 4854 scope.go:117] "RemoveContainer" containerID="1a6d7c2acd862f8700bbd5c57584e1b55c5f1da6ff0c8431f1cd400508713d7c" Nov 25 09:37:50 crc kubenswrapper[4854]: E1125 09:37:50.014029 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-gt7mq_openshift-ovn-kubernetes(b2e0e8c4-35b0-4ca8-acec-d6c94cf76354)\"" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.087116 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.087157 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.087168 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.087183 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.087194 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:50Z","lastTransitionTime":"2025-11-25T09:37:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.189419 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.189453 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.189471 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.189490 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.189500 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:50Z","lastTransitionTime":"2025-11-25T09:37:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.292045 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.292396 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.292525 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.292656 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.292801 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:50Z","lastTransitionTime":"2025-11-25T09:37:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.394792 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.394830 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.394839 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.394852 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.394862 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:50Z","lastTransitionTime":"2025-11-25T09:37:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.497114 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.497183 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.497199 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.497220 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.497277 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:50Z","lastTransitionTime":"2025-11-25T09:37:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.599795 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.599845 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.599855 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.599869 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.599878 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:50Z","lastTransitionTime":"2025-11-25T09:37:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.702582 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.702630 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.702642 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.702658 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.702692 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:50Z","lastTransitionTime":"2025-11-25T09:37:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.804886 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.804914 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.804923 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.804935 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.804945 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:50Z","lastTransitionTime":"2025-11-25T09:37:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.907020 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.907062 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.907073 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.907088 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:50 crc kubenswrapper[4854]: I1125 09:37:50.907099 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:50Z","lastTransitionTime":"2025-11-25T09:37:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.009899 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.009963 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.009976 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.009991 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.010001 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:51Z","lastTransitionTime":"2025-11-25T09:37:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.112519 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.112570 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.112580 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.112597 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.112607 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:51Z","lastTransitionTime":"2025-11-25T09:37:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.215263 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.215328 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.215340 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.215360 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.215375 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:51Z","lastTransitionTime":"2025-11-25T09:37:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.318189 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.318244 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.318254 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.318267 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.318277 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:51Z","lastTransitionTime":"2025-11-25T09:37:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.421369 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.421434 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.421457 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.421486 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.421511 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:51Z","lastTransitionTime":"2025-11-25T09:37:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.524614 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.524658 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.524694 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.524721 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.524736 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:51Z","lastTransitionTime":"2025-11-25T09:37:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.627115 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.627150 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.627160 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.627174 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.627183 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:51Z","lastTransitionTime":"2025-11-25T09:37:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.729558 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.729601 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.729615 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.729638 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.729651 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:51Z","lastTransitionTime":"2025-11-25T09:37:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.832575 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.832627 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.832641 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.832659 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.832691 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:51Z","lastTransitionTime":"2025-11-25T09:37:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.935471 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.935529 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.935544 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.935583 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:51 crc kubenswrapper[4854]: I1125 09:37:51.935598 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:51Z","lastTransitionTime":"2025-11-25T09:37:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.013334 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.013411 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:37:52 crc kubenswrapper[4854]: E1125 09:37:52.013479 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:37:52 crc kubenswrapper[4854]: E1125 09:37:52.013581 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rbb99" podUID="377b0f2c-4152-40db-a4c1-be3126061d7e" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.013657 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:37:52 crc kubenswrapper[4854]: E1125 09:37:52.013814 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.013885 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:37:52 crc kubenswrapper[4854]: E1125 09:37:52.014064 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.038434 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.038477 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.038489 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.038506 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.038516 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:52Z","lastTransitionTime":"2025-11-25T09:37:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.140497 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.140543 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.140555 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.140568 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.140578 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:52Z","lastTransitionTime":"2025-11-25T09:37:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.242827 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.242894 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.242917 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.242946 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.242979 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:52Z","lastTransitionTime":"2025-11-25T09:37:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.345338 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.345406 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.345426 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.345453 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.345472 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:52Z","lastTransitionTime":"2025-11-25T09:37:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.466187 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.466221 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.466232 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.466247 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.466258 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:52Z","lastTransitionTime":"2025-11-25T09:37:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.567857 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.567894 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.567904 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.567919 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.567929 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:52Z","lastTransitionTime":"2025-11-25T09:37:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.670119 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.670149 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.670157 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.670172 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.670183 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:52Z","lastTransitionTime":"2025-11-25T09:37:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.772352 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.772379 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.772386 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.772399 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.772407 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:52Z","lastTransitionTime":"2025-11-25T09:37:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.874826 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.874867 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.874877 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.874895 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.874904 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:52Z","lastTransitionTime":"2025-11-25T09:37:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.977420 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.977475 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.977495 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.977521 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:52 crc kubenswrapper[4854]: I1125 09:37:52.977538 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:52Z","lastTransitionTime":"2025-11-25T09:37:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.080244 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.080320 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.080336 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.080356 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.080369 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:53Z","lastTransitionTime":"2025-11-25T09:37:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.184022 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.184079 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.184095 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.184115 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.184129 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:53Z","lastTransitionTime":"2025-11-25T09:37:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.286923 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.287020 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.287045 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.287078 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.287101 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:53Z","lastTransitionTime":"2025-11-25T09:37:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.341975 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.342012 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.342023 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.342038 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.342051 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:53Z","lastTransitionTime":"2025-11-25T09:37:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:53 crc kubenswrapper[4854]: E1125 09:37:53.353828 4854 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a363dd8e-616a-41fb-b3a6-8f9b7ff40e37\\\",\\\"systemUUID\\\":\\\"9ec5d79d-dba9-49c0-8c51-26f030e53128\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:53Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.357364 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.357389 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.357398 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.357410 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.357420 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:53Z","lastTransitionTime":"2025-11-25T09:37:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:53 crc kubenswrapper[4854]: E1125 09:37:53.373050 4854 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a363dd8e-616a-41fb-b3a6-8f9b7ff40e37\\\",\\\"systemUUID\\\":\\\"9ec5d79d-dba9-49c0-8c51-26f030e53128\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:53Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.377417 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.377591 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.377725 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.377870 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.377968 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:53Z","lastTransitionTime":"2025-11-25T09:37:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:53 crc kubenswrapper[4854]: E1125 09:37:53.397116 4854 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a363dd8e-616a-41fb-b3a6-8f9b7ff40e37\\\",\\\"systemUUID\\\":\\\"9ec5d79d-dba9-49c0-8c51-26f030e53128\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:53Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.401086 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.401147 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.401165 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.401188 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.401208 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:53Z","lastTransitionTime":"2025-11-25T09:37:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:53 crc kubenswrapper[4854]: E1125 09:37:53.414659 4854 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a363dd8e-616a-41fb-b3a6-8f9b7ff40e37\\\",\\\"systemUUID\\\":\\\"9ec5d79d-dba9-49c0-8c51-26f030e53128\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:53Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.418858 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.418894 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.418906 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.418924 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.418936 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:53Z","lastTransitionTime":"2025-11-25T09:37:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:53 crc kubenswrapper[4854]: E1125 09:37:53.430798 4854 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a363dd8e-616a-41fb-b3a6-8f9b7ff40e37\\\",\\\"systemUUID\\\":\\\"9ec5d79d-dba9-49c0-8c51-26f030e53128\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:53Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:53 crc kubenswrapper[4854]: E1125 09:37:53.430957 4854 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.432639 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.432692 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.432705 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.432722 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.432734 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:53Z","lastTransitionTime":"2025-11-25T09:37:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.535587 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.535628 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.535639 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.535656 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.535683 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:53Z","lastTransitionTime":"2025-11-25T09:37:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.638142 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.638195 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.638208 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.638226 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.638239 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:53Z","lastTransitionTime":"2025-11-25T09:37:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.740798 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.740848 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.740859 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.740876 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.740887 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:53Z","lastTransitionTime":"2025-11-25T09:37:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.843144 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.843188 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.843202 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.843221 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.843236 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:53Z","lastTransitionTime":"2025-11-25T09:37:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.946887 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.946984 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.947004 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.947036 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:53 crc kubenswrapper[4854]: I1125 09:37:53.947055 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:53Z","lastTransitionTime":"2025-11-25T09:37:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.012913 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.012981 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.013104 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:37:54 crc kubenswrapper[4854]: E1125 09:37:54.013505 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rbb99" podUID="377b0f2c-4152-40db-a4c1-be3126061d7e" Nov 25 09:37:54 crc kubenswrapper[4854]: E1125 09:37:54.013307 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.013173 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:37:54 crc kubenswrapper[4854]: E1125 09:37:54.013640 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:37:54 crc kubenswrapper[4854]: E1125 09:37:54.013820 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.050043 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.050099 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.050113 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.050129 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.050141 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:54Z","lastTransitionTime":"2025-11-25T09:37:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.152838 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.152891 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.152906 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.152928 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.152943 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:54Z","lastTransitionTime":"2025-11-25T09:37:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.254930 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.254964 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.254975 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.254990 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.255000 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:54Z","lastTransitionTime":"2025-11-25T09:37:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.357541 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.357600 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.357616 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.357637 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.357654 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:54Z","lastTransitionTime":"2025-11-25T09:37:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.460138 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.460187 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.460202 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.460221 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.460232 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:54Z","lastTransitionTime":"2025-11-25T09:37:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.573658 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.573720 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.573729 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.573746 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.573758 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:54Z","lastTransitionTime":"2025-11-25T09:37:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.675311 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.675340 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.675349 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.675361 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.675370 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:54Z","lastTransitionTime":"2025-11-25T09:37:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.777171 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.777208 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.777218 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.777232 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.777242 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:54Z","lastTransitionTime":"2025-11-25T09:37:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.879346 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.879376 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.879386 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.879399 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.879408 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:54Z","lastTransitionTime":"2025-11-25T09:37:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.981080 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.981451 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.981600 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.981796 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:54 crc kubenswrapper[4854]: I1125 09:37:54.981963 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:54Z","lastTransitionTime":"2025-11-25T09:37:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.031662 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06a8e6c353d6a9d159c07ccbd19c8d659f360fba227bd027a3d3e32aadff5724\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.048788 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13fffc919890dba4c50685bc1aa11c87a1c3e8ca4c22d40bf3993ecb29cb2b7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e04e784232d124ccfcfcbb42a1b3f2fe4dd737520c60374a9fe1d1dcd8a9a56c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.068427 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b34a8b788c0ff6e0d0cb1dc375ec9e2ae9a748c911713adc69f9e37f576222ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a694f4eb324ec480c999e6d57a53599b61b5bb42c3f3342e4672bc90b2e095e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e15435107b0cdf606bf3d4f78ddfec8b6641611a912f3fea51ba1ffc3031df3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb143029f5bbd51e49119aba69fcb5ecb8febacbdfa089ccef1298a4f372278b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://225d630b20a5da5172de2d03044794bbf6ac21d95421a502d21c642c92421e83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6acdb40732904b212fc33575cf482c9e194722160e66442aee0619cda7f2c755\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a6d7c2acd862f8700bbd5c57584e1b55c5f1da6ff0c8431f1cd400508713d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a6d7c2acd862f8700bbd5c57584e1b55c5f1da6ff0c8431f1cd400508713d7c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:37:34Z\\\",\\\"message\\\":\\\"ID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-controller-manager/controller-manager\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.149\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1125 09:37:34.797009 6613 services_controller.go:356] Processing sync for service openshift-multus/network-metrics-service for network=default\\\\nI1125 09:37:34.797784 6613 services_controller.go:360] Finished syncing service network-metrics-service on namespace openshift-multus for network=default : 776.472µs\\\\nI1125 09:37:34.797827 6613 services_controller.go:356] Processing sync for service openshift-image-registry/image-registry-operator for network=default\\\\nI1125 09:37:34.797857 6613 services_controller.go:360] Finished syncing service image-registry-operator on namespace openshift-image-registry for network=default : 30.941µs\\\\nF1125 09:37:34.797882 6613 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:34Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-gt7mq_openshift-ovn-kubernetes(b2e0e8c4-35b0-4ca8-acec-d6c94cf76354)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50a479ca6721781365a54ff1fd8aad2b8b97e0f298fe46b62c51cbc2434e980b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r24xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gt7mq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.078045 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lf42b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e0358b43-024d-430b-b886-c3ba51fb479e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://77304ae3027f1ed588bd648fe7573d6a3bc7502ab02db6687fd7f9a885429104\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9zdw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lf42b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.084574 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.084659 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.084697 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.084723 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.084734 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:55Z","lastTransitionTime":"2025-11-25T09:37:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.137695 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zd22" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7cb2c023-6f3b-4c24-a49d-1f4686b5eca5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://defcab141d127cd4f8abcc2c984b23984b793a596c443ce6ab8317400f5a4ed5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9v8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00a26dc878dfb14a49676fc2c48da211fe1d5a28609cd5f564512cdb6a60fe6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9v8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:37:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7zd22\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.152576 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe8c3a2-bb2a-4a49-b104-fb0f10a74b74\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c67a2da54854403d420ac7c05ea4211260914f5123d6ac5f086e22b88256a331\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://64018e6a7f7cdfcdd8339c676ebae79d9047a099a69048351406278d2a142863\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://53b3a5ae9621d23b07fe07044dba09946f559dc5b8e75d8f1b6a2ccd0c672247\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.169902 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.184936 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b789d7d13805b447638f66bcfa89b997c9ad47c92e85a776e97e70ea1a7e950\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e898d302f42097c6c149260d69f6cdc0bc4088e1b86714c3344a375b16cd7a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbvn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9qdk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.187130 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.187271 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.187354 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.187425 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.187486 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:55Z","lastTransitionTime":"2025-11-25T09:37:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.197493 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36afbdef-e971-4c88-b8fd-0f289b9dd07c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:36:49Z\\\",\\\"message\\\":\\\"W1125 09:36:38.376967 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:36:38.377570 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063398 cert, and key in /tmp/serving-cert-3164028442/serving-signer.crt, /tmp/serving-cert-3164028442/serving-signer.key\\\\nI1125 09:36:38.736353 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:36:38.739775 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:36:38.739926 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:36:38.743151 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3164028442/tls.crt::/tmp/serving-cert-3164028442/tls.key\\\\\\\"\\\\nF1125 09:36:49.315575 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:38Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.209330 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ba9be543-b9b2-43fe-b387-e4e8d93a5f33\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://970f252dcc9145faeafc8d8bd7ac63b329710552096733720038b3cc76a6739e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ccf28beb4109e4bf20284e9549e2063585667d175f20d55ccce18b7b7441e4e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://367de262895fba6dd5ef497f51059236c7b388d54847ea16f79bc1157017b0ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a541e16003f5246d94c8e614253336b755097bbb7989b98efeffba79091aa5ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a541e16003f5246d94c8e614253336b755097bbb7989b98efeffba79091aa5ab\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:36Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:35Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.220135 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd00115c937d7bd2a5f0c16c846127b630c250ba8bd33ec244ee0df3c2649e2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.233302 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l4c8x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"baab3391-6269-467e-be1c-c992c82ddd7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caac5bab62b2f161eafa08b0d713c2619e27e61b24b106286f9e08c46af2c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xkwcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l4c8x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.244614 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rbb99" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"377b0f2c-4152-40db-a4c1-be3126061d7e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr9p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr9p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:37:11Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rbb99\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.255419 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.270455 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.287836 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34f44032-4367-4650-b4e0-02aa8d3209ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a71ae62f5763cc4804ec5c00a4c8619d470790e92dd99e1afe671ae166458c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ebc3cd095f1169b6b67037cfbb830acf7b1275acd3f6af69ee5d7db8d784ab8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1e5f93d50c32f46bf64187f8eb153bf6aa142daeabaf22ddb8fde3e81f67d0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:36:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f9b0b8c346ae1cd9ad0c8a82cc54b4920b77c7e039cbd2fcfdd84943ec53998\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aea8d8f7acec538bc1237bbc0a8e37d86957698b935ecdadcfdf05c4ddd54b3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71d1b2734ddab9b73793ffd6d53d72cb863f94b4538ed3e4b3d3192f7b40aa60\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6b6baad6fa6ba1c2a10e119763cba1630f36d82bddf5ac2bffdb8e7be67ded2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:37:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:37:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whb45\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vkjjq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.289750 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.289808 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.289828 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.289851 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.289869 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:55Z","lastTransitionTime":"2025-11-25T09:37:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.302397 4854 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-txnt5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e9f759-2eea-43cd-9e0a-6f149785c431\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:36:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:37:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5feac493298404321ee4fceca3870193d9a2cb42b9f7d769848893b756653fb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:37:45Z\\\",\\\"message\\\":\\\"2025-11-25T09:37:00+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_1391125d-1ba3-469c-948a-3d4482bec319\\\\n2025-11-25T09:37:00+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_1391125d-1ba3-469c-948a-3d4482bec319 to /host/opt/cni/bin/\\\\n2025-11-25T09:37:00Z [verbose] multus-daemon started\\\\n2025-11-25T09:37:00Z [verbose] Readiness Indicator file check\\\\n2025-11-25T09:37:45Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:36:57Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:37:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vhlt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:36:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-txnt5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:37:55Z is after 2025-08-24T17:21:41Z" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.391661 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.391724 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.391733 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.391750 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.391759 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:55Z","lastTransitionTime":"2025-11-25T09:37:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.494614 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.494721 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.494746 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.494775 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.494796 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:55Z","lastTransitionTime":"2025-11-25T09:37:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.597826 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.597877 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.597890 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.597911 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.597926 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:55Z","lastTransitionTime":"2025-11-25T09:37:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.701010 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.701075 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.701108 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.701139 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.701161 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:55Z","lastTransitionTime":"2025-11-25T09:37:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.803701 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.803778 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.803800 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.803830 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.803853 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:55Z","lastTransitionTime":"2025-11-25T09:37:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.906577 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.906630 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.906641 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.906663 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:55 crc kubenswrapper[4854]: I1125 09:37:55.906776 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:55Z","lastTransitionTime":"2025-11-25T09:37:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.010148 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.010209 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.010244 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.010268 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.010287 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:56Z","lastTransitionTime":"2025-11-25T09:37:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.012370 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.012433 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.012437 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.012375 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:37:56 crc kubenswrapper[4854]: E1125 09:37:56.012551 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:37:56 crc kubenswrapper[4854]: E1125 09:37:56.012664 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:37:56 crc kubenswrapper[4854]: E1125 09:37:56.012833 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:37:56 crc kubenswrapper[4854]: E1125 09:37:56.012912 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rbb99" podUID="377b0f2c-4152-40db-a4c1-be3126061d7e" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.113078 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.113138 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.113163 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.113194 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.113216 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:56Z","lastTransitionTime":"2025-11-25T09:37:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.216500 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.216542 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.216554 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.216570 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.216581 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:56Z","lastTransitionTime":"2025-11-25T09:37:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.320196 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.320281 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.320295 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.320313 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.320325 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:56Z","lastTransitionTime":"2025-11-25T09:37:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.423291 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.423348 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.423362 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.423385 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.423399 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:56Z","lastTransitionTime":"2025-11-25T09:37:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.526001 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.526050 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.526064 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.526080 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.526092 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:56Z","lastTransitionTime":"2025-11-25T09:37:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.628969 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.629070 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.629085 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.629099 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.629110 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:56Z","lastTransitionTime":"2025-11-25T09:37:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.731440 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.731475 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.731483 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.731497 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.731506 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:56Z","lastTransitionTime":"2025-11-25T09:37:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.834907 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.834961 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.834972 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.834995 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.835013 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:56Z","lastTransitionTime":"2025-11-25T09:37:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.943855 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.943902 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.943915 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.943944 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:56 crc kubenswrapper[4854]: I1125 09:37:56.943958 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:56Z","lastTransitionTime":"2025-11-25T09:37:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.046796 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.046849 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.046859 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.046872 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.046882 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:57Z","lastTransitionTime":"2025-11-25T09:37:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.149253 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.149501 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.149590 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.149680 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.149747 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:57Z","lastTransitionTime":"2025-11-25T09:37:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.253056 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.253104 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.253113 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.253127 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.253137 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:57Z","lastTransitionTime":"2025-11-25T09:37:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.355487 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.355521 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.355529 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.355543 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.355554 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:57Z","lastTransitionTime":"2025-11-25T09:37:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.458478 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.458513 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.458521 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.458536 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.458550 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:57Z","lastTransitionTime":"2025-11-25T09:37:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.560422 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.560494 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.560520 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.560549 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.560571 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:57Z","lastTransitionTime":"2025-11-25T09:37:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.663245 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.663282 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.663293 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.663585 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.663602 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:57Z","lastTransitionTime":"2025-11-25T09:37:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.766605 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.766646 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.766657 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.766688 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.766701 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:57Z","lastTransitionTime":"2025-11-25T09:37:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.870122 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.870212 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.870232 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.870257 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.870277 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:57Z","lastTransitionTime":"2025-11-25T09:37:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.973460 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.973826 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.973970 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.974159 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:57 crc kubenswrapper[4854]: I1125 09:37:57.974300 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:57Z","lastTransitionTime":"2025-11-25T09:37:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.013522 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.013607 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.013621 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:37:58 crc kubenswrapper[4854]: E1125 09:37:58.014165 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:37:58 crc kubenswrapper[4854]: E1125 09:37:58.014288 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:37:58 crc kubenswrapper[4854]: E1125 09:37:58.013981 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rbb99" podUID="377b0f2c-4152-40db-a4c1-be3126061d7e" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.014302 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:37:58 crc kubenswrapper[4854]: E1125 09:37:58.014402 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.077996 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.078307 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.078401 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.078496 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.078597 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:58Z","lastTransitionTime":"2025-11-25T09:37:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.182367 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.182421 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.182439 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.182461 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.182480 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:58Z","lastTransitionTime":"2025-11-25T09:37:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.285299 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.285332 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.285341 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.285356 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.285366 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:58Z","lastTransitionTime":"2025-11-25T09:37:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.388473 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.388522 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.388534 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.388554 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.388567 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:58Z","lastTransitionTime":"2025-11-25T09:37:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.493048 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.493386 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.493494 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.493590 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.493718 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:58Z","lastTransitionTime":"2025-11-25T09:37:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.595818 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.595868 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.595877 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.595891 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.595900 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:58Z","lastTransitionTime":"2025-11-25T09:37:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.699100 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.699191 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.699218 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.699250 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.699274 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:58Z","lastTransitionTime":"2025-11-25T09:37:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.801912 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.801963 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.801976 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.801992 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.802003 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:58Z","lastTransitionTime":"2025-11-25T09:37:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.905533 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.905564 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.905573 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.905589 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:58 crc kubenswrapper[4854]: I1125 09:37:58.905601 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:58Z","lastTransitionTime":"2025-11-25T09:37:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.007897 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.007949 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.007965 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.007987 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.008002 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:59Z","lastTransitionTime":"2025-11-25T09:37:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.111038 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.111071 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.111080 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.111094 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.111104 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:59Z","lastTransitionTime":"2025-11-25T09:37:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.213770 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.213801 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.213808 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.213821 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.213830 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:59Z","lastTransitionTime":"2025-11-25T09:37:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.316483 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.316518 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.316527 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.316541 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.316552 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:59Z","lastTransitionTime":"2025-11-25T09:37:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.419783 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.419834 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.419846 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.419863 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.419882 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:59Z","lastTransitionTime":"2025-11-25T09:37:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.522559 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.522625 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.522634 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.522684 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.522695 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:59Z","lastTransitionTime":"2025-11-25T09:37:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.624372 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.624417 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.624429 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.624446 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.624459 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:59Z","lastTransitionTime":"2025-11-25T09:37:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.726712 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.726749 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.726760 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.726774 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.726784 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:59Z","lastTransitionTime":"2025-11-25T09:37:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.829267 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.829508 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.829637 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.829763 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.829856 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:59Z","lastTransitionTime":"2025-11-25T09:37:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.929035 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:37:59 crc kubenswrapper[4854]: E1125 09:37:59.929184 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:39:03.929152299 +0000 UTC m=+149.782145675 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.929443 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.929508 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.929565 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.929600 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:37:59 crc kubenswrapper[4854]: E1125 09:37:59.929629 4854 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:37:59 crc kubenswrapper[4854]: E1125 09:37:59.929732 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:39:03.929714265 +0000 UTC m=+149.782707641 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:37:59 crc kubenswrapper[4854]: E1125 09:37:59.929786 4854 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:37:59 crc kubenswrapper[4854]: E1125 09:37:59.929806 4854 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:37:59 crc kubenswrapper[4854]: E1125 09:37:59.929819 4854 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:37:59 crc kubenswrapper[4854]: E1125 09:37:59.929861 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 09:39:03.929851108 +0000 UTC m=+149.782844594 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:37:59 crc kubenswrapper[4854]: E1125 09:37:59.929922 4854 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:37:59 crc kubenswrapper[4854]: E1125 09:37:59.929934 4854 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:37:59 crc kubenswrapper[4854]: E1125 09:37:59.929944 4854 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:37:59 crc kubenswrapper[4854]: E1125 09:37:59.929969 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 09:39:03.929961792 +0000 UTC m=+149.782955168 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:37:59 crc kubenswrapper[4854]: E1125 09:37:59.929984 4854 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:37:59 crc kubenswrapper[4854]: E1125 09:37:59.930105 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:39:03.930077665 +0000 UTC m=+149.783071081 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.931797 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.931830 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.931841 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.931857 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:37:59 crc kubenswrapper[4854]: I1125 09:37:59.931868 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:37:59Z","lastTransitionTime":"2025-11-25T09:37:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.012757 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.012792 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.012816 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:38:00 crc kubenswrapper[4854]: E1125 09:38:00.013481 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:38:00 crc kubenswrapper[4854]: E1125 09:38:00.013178 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rbb99" podUID="377b0f2c-4152-40db-a4c1-be3126061d7e" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.012857 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:38:00 crc kubenswrapper[4854]: E1125 09:38:00.013830 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:38:00 crc kubenswrapper[4854]: E1125 09:38:00.013934 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.034547 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.034588 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.034598 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.034614 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.034625 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:38:00Z","lastTransitionTime":"2025-11-25T09:38:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.136561 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.136617 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.136635 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.136658 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.136705 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:38:00Z","lastTransitionTime":"2025-11-25T09:38:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.239049 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.239110 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.239127 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.239149 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.239168 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:38:00Z","lastTransitionTime":"2025-11-25T09:38:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.341205 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.341273 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.341298 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.341332 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.341360 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:38:00Z","lastTransitionTime":"2025-11-25T09:38:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.443313 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.443378 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.443396 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.443420 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.443436 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:38:00Z","lastTransitionTime":"2025-11-25T09:38:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.546663 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.546794 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.546825 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.546855 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.546893 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:38:00Z","lastTransitionTime":"2025-11-25T09:38:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.650097 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.650170 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.650194 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.650222 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.650243 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:38:00Z","lastTransitionTime":"2025-11-25T09:38:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.753594 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.753659 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.753721 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.753794 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.753823 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:38:00Z","lastTransitionTime":"2025-11-25T09:38:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.856136 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.856187 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.856201 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.856222 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.856244 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:38:00Z","lastTransitionTime":"2025-11-25T09:38:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.959387 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.959427 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.959439 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.959454 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:38:00 crc kubenswrapper[4854]: I1125 09:38:00.959464 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:38:00Z","lastTransitionTime":"2025-11-25T09:38:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.034564 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.061929 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.061986 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.062006 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.062030 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.062051 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:38:01Z","lastTransitionTime":"2025-11-25T09:38:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.164324 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.164367 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.164379 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.164395 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.164407 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:38:01Z","lastTransitionTime":"2025-11-25T09:38:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.267762 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.267802 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.267812 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.267826 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.267835 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:38:01Z","lastTransitionTime":"2025-11-25T09:38:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.370721 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.370767 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.370779 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.370797 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.370809 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:38:01Z","lastTransitionTime":"2025-11-25T09:38:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.472947 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.472977 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.472985 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.473001 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.473010 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:38:01Z","lastTransitionTime":"2025-11-25T09:38:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.574917 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.574976 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.574987 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.575003 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.575014 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:38:01Z","lastTransitionTime":"2025-11-25T09:38:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.678389 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.678441 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.678455 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.678474 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.678487 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:38:01Z","lastTransitionTime":"2025-11-25T09:38:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.781380 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.781445 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.781463 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.781492 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.781507 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:38:01Z","lastTransitionTime":"2025-11-25T09:38:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.884209 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.884248 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.884261 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.884277 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.884288 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:38:01Z","lastTransitionTime":"2025-11-25T09:38:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.986999 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.987060 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.987080 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.987105 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:38:01 crc kubenswrapper[4854]: I1125 09:38:01.987123 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:38:01Z","lastTransitionTime":"2025-11-25T09:38:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.013220 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:38:02 crc kubenswrapper[4854]: E1125 09:38:02.013840 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.013864 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.013941 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:38:02 crc kubenswrapper[4854]: E1125 09:38:02.014042 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.014090 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:38:02 crc kubenswrapper[4854]: E1125 09:38:02.014116 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:38:02 crc kubenswrapper[4854]: E1125 09:38:02.014220 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rbb99" podUID="377b0f2c-4152-40db-a4c1-be3126061d7e" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.090612 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.090641 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.090649 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.090663 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.090697 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:38:02Z","lastTransitionTime":"2025-11-25T09:38:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.192930 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.192992 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.193014 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.193044 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.193059 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:38:02Z","lastTransitionTime":"2025-11-25T09:38:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.295607 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.295652 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.295686 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.295738 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.295752 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:38:02Z","lastTransitionTime":"2025-11-25T09:38:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.397909 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.397954 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.397966 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.397981 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.397990 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:38:02Z","lastTransitionTime":"2025-11-25T09:38:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.500537 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.500577 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.500588 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.500603 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.500614 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:38:02Z","lastTransitionTime":"2025-11-25T09:38:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.602291 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.602322 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.602330 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.602344 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.602356 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:38:02Z","lastTransitionTime":"2025-11-25T09:38:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.704357 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.704413 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.704427 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.704447 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.704459 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:38:02Z","lastTransitionTime":"2025-11-25T09:38:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.807354 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.807404 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.807417 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.807435 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.807454 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:38:02Z","lastTransitionTime":"2025-11-25T09:38:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.910777 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.910842 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.910865 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.910894 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:38:02 crc kubenswrapper[4854]: I1125 09:38:02.910919 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:38:02Z","lastTransitionTime":"2025-11-25T09:38:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.013423 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.013477 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.013490 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.013510 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.013524 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:38:03Z","lastTransitionTime":"2025-11-25T09:38:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.115589 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.115642 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.115658 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.115753 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.115772 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:38:03Z","lastTransitionTime":"2025-11-25T09:38:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.218016 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.218087 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.218101 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.218117 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.218128 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:38:03Z","lastTransitionTime":"2025-11-25T09:38:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.321149 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.321188 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.321199 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.321213 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.321224 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:38:03Z","lastTransitionTime":"2025-11-25T09:38:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.423078 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.423126 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.423146 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.423170 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.423185 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:38:03Z","lastTransitionTime":"2025-11-25T09:38:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.435760 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.435833 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.435845 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.435863 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.435901 4854 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:38:03Z","lastTransitionTime":"2025-11-25T09:38:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.492574 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-fvzbg"] Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.493085 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fvzbg" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.495407 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.498466 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.498510 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.498551 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.513496 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podStartSLOduration=68.513477375 podStartE2EDuration="1m8.513477375s" podCreationTimestamp="2025-11-25 09:36:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:03.513450855 +0000 UTC m=+89.366444231" watchObservedRunningTime="2025-11-25 09:38:03.513477375 +0000 UTC m=+89.366470761" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.539874 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=67.539852992 podStartE2EDuration="1m7.539852992s" podCreationTimestamp="2025-11-25 09:36:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:03.527202604 +0000 UTC m=+89.380195990" watchObservedRunningTime="2025-11-25 09:38:03.539852992 +0000 UTC m=+89.392846368" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.550563 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-l4c8x" podStartSLOduration=68.550543764 podStartE2EDuration="1m8.550543764s" podCreationTimestamp="2025-11-25 09:36:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:03.550512333 +0000 UTC m=+89.403505729" watchObservedRunningTime="2025-11-25 09:38:03.550543764 +0000 UTC m=+89.403537150" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.572591 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/5d0ad4e7-66bc-43f6-85ca-96b054cd8f2e-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-fvzbg\" (UID: \"5d0ad4e7-66bc-43f6-85ca-96b054cd8f2e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fvzbg" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.572709 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5d0ad4e7-66bc-43f6-85ca-96b054cd8f2e-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-fvzbg\" (UID: \"5d0ad4e7-66bc-43f6-85ca-96b054cd8f2e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fvzbg" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.572773 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5d0ad4e7-66bc-43f6-85ca-96b054cd8f2e-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-fvzbg\" (UID: \"5d0ad4e7-66bc-43f6-85ca-96b054cd8f2e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fvzbg" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.572806 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/5d0ad4e7-66bc-43f6-85ca-96b054cd8f2e-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-fvzbg\" (UID: \"5d0ad4e7-66bc-43f6-85ca-96b054cd8f2e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fvzbg" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.572897 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/5d0ad4e7-66bc-43f6-85ca-96b054cd8f2e-service-ca\") pod \"cluster-version-operator-5c965bbfc6-fvzbg\" (UID: \"5d0ad4e7-66bc-43f6-85ca-96b054cd8f2e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fvzbg" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.576938 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=67.57692223 podStartE2EDuration="1m7.57692223s" podCreationTimestamp="2025-11-25 09:36:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:03.576803706 +0000 UTC m=+89.429797102" watchObservedRunningTime="2025-11-25 09:38:03.57692223 +0000 UTC m=+89.429915606" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.599297 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=35.599276012 podStartE2EDuration="35.599276012s" podCreationTimestamp="2025-11-25 09:37:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:03.589289049 +0000 UTC m=+89.442282435" watchObservedRunningTime="2025-11-25 09:38:03.599276012 +0000 UTC m=+89.452269388" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.613010 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-txnt5" podStartSLOduration=68.6129887 podStartE2EDuration="1m8.6129887s" podCreationTimestamp="2025-11-25 09:36:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:03.612795614 +0000 UTC m=+89.465788980" watchObservedRunningTime="2025-11-25 09:38:03.6129887 +0000 UTC m=+89.465982066" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.653823 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-vkjjq" podStartSLOduration=68.653803803 podStartE2EDuration="1m8.653803803s" podCreationTimestamp="2025-11-25 09:36:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:03.653647239 +0000 UTC m=+89.506640635" watchObservedRunningTime="2025-11-25 09:38:03.653803803 +0000 UTC m=+89.506797179" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.673478 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/5d0ad4e7-66bc-43f6-85ca-96b054cd8f2e-service-ca\") pod \"cluster-version-operator-5c965bbfc6-fvzbg\" (UID: \"5d0ad4e7-66bc-43f6-85ca-96b054cd8f2e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fvzbg" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.673535 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/5d0ad4e7-66bc-43f6-85ca-96b054cd8f2e-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-fvzbg\" (UID: \"5d0ad4e7-66bc-43f6-85ca-96b054cd8f2e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fvzbg" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.673553 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5d0ad4e7-66bc-43f6-85ca-96b054cd8f2e-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-fvzbg\" (UID: \"5d0ad4e7-66bc-43f6-85ca-96b054cd8f2e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fvzbg" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.673578 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5d0ad4e7-66bc-43f6-85ca-96b054cd8f2e-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-fvzbg\" (UID: \"5d0ad4e7-66bc-43f6-85ca-96b054cd8f2e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fvzbg" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.673595 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/5d0ad4e7-66bc-43f6-85ca-96b054cd8f2e-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-fvzbg\" (UID: \"5d0ad4e7-66bc-43f6-85ca-96b054cd8f2e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fvzbg" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.673623 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/5d0ad4e7-66bc-43f6-85ca-96b054cd8f2e-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-fvzbg\" (UID: \"5d0ad4e7-66bc-43f6-85ca-96b054cd8f2e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fvzbg" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.673663 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/5d0ad4e7-66bc-43f6-85ca-96b054cd8f2e-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-fvzbg\" (UID: \"5d0ad4e7-66bc-43f6-85ca-96b054cd8f2e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fvzbg" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.674459 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/5d0ad4e7-66bc-43f6-85ca-96b054cd8f2e-service-ca\") pod \"cluster-version-operator-5c965bbfc6-fvzbg\" (UID: \"5d0ad4e7-66bc-43f6-85ca-96b054cd8f2e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fvzbg" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.679609 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5d0ad4e7-66bc-43f6-85ca-96b054cd8f2e-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-fvzbg\" (UID: \"5d0ad4e7-66bc-43f6-85ca-96b054cd8f2e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fvzbg" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.693409 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5d0ad4e7-66bc-43f6-85ca-96b054cd8f2e-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-fvzbg\" (UID: \"5d0ad4e7-66bc-43f6-85ca-96b054cd8f2e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fvzbg" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.699343 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-lf42b" podStartSLOduration=67.699333001 podStartE2EDuration="1m7.699333001s" podCreationTimestamp="2025-11-25 09:36:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:03.685349986 +0000 UTC m=+89.538343372" watchObservedRunningTime="2025-11-25 09:38:03.699333001 +0000 UTC m=+89.552326377" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.699888 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7zd22" podStartSLOduration=66.699883006 podStartE2EDuration="1m6.699883006s" podCreationTimestamp="2025-11-25 09:36:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:03.699513276 +0000 UTC m=+89.552506652" watchObservedRunningTime="2025-11-25 09:38:03.699883006 +0000 UTC m=+89.552876382" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.768730 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=2.768710253 podStartE2EDuration="2.768710253s" podCreationTimestamp="2025-11-25 09:38:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:03.767732965 +0000 UTC m=+89.620726361" watchObservedRunningTime="2025-11-25 09:38:03.768710253 +0000 UTC m=+89.621703649" Nov 25 09:38:03 crc kubenswrapper[4854]: I1125 09:38:03.808877 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fvzbg" Nov 25 09:38:04 crc kubenswrapper[4854]: I1125 09:38:04.013454 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:38:04 crc kubenswrapper[4854]: I1125 09:38:04.013535 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:38:04 crc kubenswrapper[4854]: E1125 09:38:04.013943 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:38:04 crc kubenswrapper[4854]: E1125 09:38:04.013956 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:38:04 crc kubenswrapper[4854]: I1125 09:38:04.013574 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:38:04 crc kubenswrapper[4854]: E1125 09:38:04.014328 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:38:04 crc kubenswrapper[4854]: I1125 09:38:04.014000 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:38:04 crc kubenswrapper[4854]: E1125 09:38:04.014580 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rbb99" podUID="377b0f2c-4152-40db-a4c1-be3126061d7e" Nov 25 09:38:04 crc kubenswrapper[4854]: I1125 09:38:04.443732 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fvzbg" event={"ID":"5d0ad4e7-66bc-43f6-85ca-96b054cd8f2e","Type":"ContainerStarted","Data":"a16e5734c1ee1fb505c81506ce1d1c2e6fd8487cbb3f447d7bc4f165c0122205"} Nov 25 09:38:04 crc kubenswrapper[4854]: I1125 09:38:04.443817 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fvzbg" event={"ID":"5d0ad4e7-66bc-43f6-85ca-96b054cd8f2e","Type":"ContainerStarted","Data":"aca95bb7e439d3c594c3063b475a40bd5e3aab0cb395ec2804728ad3193c921c"} Nov 25 09:38:04 crc kubenswrapper[4854]: I1125 09:38:04.457889 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fvzbg" podStartSLOduration=68.457868529 podStartE2EDuration="1m8.457868529s" podCreationTimestamp="2025-11-25 09:36:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:04.457448556 +0000 UTC m=+90.310441942" watchObservedRunningTime="2025-11-25 09:38:04.457868529 +0000 UTC m=+90.310861925" Nov 25 09:38:05 crc kubenswrapper[4854]: I1125 09:38:05.014256 4854 scope.go:117] "RemoveContainer" containerID="1a6d7c2acd862f8700bbd5c57584e1b55c5f1da6ff0c8431f1cd400508713d7c" Nov 25 09:38:05 crc kubenswrapper[4854]: I1125 09:38:05.448363 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gt7mq_b2e0e8c4-35b0-4ca8-acec-d6c94cf76354/ovnkube-controller/2.log" Nov 25 09:38:05 crc kubenswrapper[4854]: I1125 09:38:05.451115 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" event={"ID":"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354","Type":"ContainerStarted","Data":"6d9f4f27a0dee1b92577e22bdfe67d21b43a1be64519f231492aeb22d0ba2e0b"} Nov 25 09:38:05 crc kubenswrapper[4854]: I1125 09:38:05.451490 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:38:05 crc kubenswrapper[4854]: I1125 09:38:05.485803 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" podStartSLOduration=70.485787122 podStartE2EDuration="1m10.485787122s" podCreationTimestamp="2025-11-25 09:36:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:05.484384523 +0000 UTC m=+91.337377899" watchObservedRunningTime="2025-11-25 09:38:05.485787122 +0000 UTC m=+91.338780498" Nov 25 09:38:06 crc kubenswrapper[4854]: I1125 09:38:06.013042 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:38:06 crc kubenswrapper[4854]: I1125 09:38:06.013072 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:38:06 crc kubenswrapper[4854]: I1125 09:38:06.013090 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:38:06 crc kubenswrapper[4854]: E1125 09:38:06.013178 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:38:06 crc kubenswrapper[4854]: I1125 09:38:06.013207 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:38:06 crc kubenswrapper[4854]: E1125 09:38:06.013325 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rbb99" podUID="377b0f2c-4152-40db-a4c1-be3126061d7e" Nov 25 09:38:06 crc kubenswrapper[4854]: E1125 09:38:06.013393 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:38:06 crc kubenswrapper[4854]: E1125 09:38:06.013458 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:38:06 crc kubenswrapper[4854]: I1125 09:38:06.051237 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-rbb99"] Nov 25 09:38:06 crc kubenswrapper[4854]: I1125 09:38:06.454016 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:38:06 crc kubenswrapper[4854]: E1125 09:38:06.454124 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rbb99" podUID="377b0f2c-4152-40db-a4c1-be3126061d7e" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.013041 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.013143 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:38:08 crc kubenswrapper[4854]: E1125 09:38:08.013183 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.013143 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:38:08 crc kubenswrapper[4854]: E1125 09:38:08.013279 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:38:08 crc kubenswrapper[4854]: E1125 09:38:08.013361 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.013835 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:38:08 crc kubenswrapper[4854]: E1125 09:38:08.013906 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rbb99" podUID="377b0f2c-4152-40db-a4c1-be3126061d7e" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.345174 4854 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.345357 4854 kubelet_node_status.go:538] "Fast updating node status as it just became ready" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.380771 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-sb8md"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.381362 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.382575 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.383698 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.383857 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.384015 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.384218 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.384409 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.384957 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.385110 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.385249 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.385427 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.387349 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-srrn8"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.388431 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fjr4q"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.388886 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fjr4q" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.389195 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-45d9l"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.389367 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-srrn8" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.389856 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-45d9l" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.390138 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-cbzc5"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.390700 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-cbzc5" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.392756 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.393882 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-62dll"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.394316 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-jvdld"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.394400 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.394655 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-hsjfc"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.395162 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hsjfc" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.395375 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.395652 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.396574 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.396881 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.397042 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-jvdld" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.397374 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.402159 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.402610 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.403011 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.403035 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.403086 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.403096 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.403217 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.403319 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.403374 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.403413 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.403451 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.403560 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.405925 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.406125 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.406220 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.409775 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.409838 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.410114 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.410721 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.411286 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-5hvjb"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.413644 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2kdq6"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.411302 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.429022 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.430024 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.432892 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5hvjb" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.434422 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.434459 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.434478 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c8bf226e-44f3-494c-b837-b9e8b9f9904d-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-srrn8\" (UID: \"c8bf226e-44f3-494c-b837-b9e8b9f9904d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-srrn8" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.434495 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/1a9dfec2-b952-4e84-9d99-377792feb851-audit-dir\") pod \"apiserver-76f77b778f-sb8md\" (UID: \"1a9dfec2-b952-4e84-9d99-377792feb851\") " pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.434490 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.434511 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.434692 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/4190d598-71ee-4d5d-885c-914ba454df27-machine-approver-tls\") pod \"machine-approver-56656f9798-hsjfc\" (UID: \"4190d598-71ee-4d5d-885c-914ba454df27\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hsjfc" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.434734 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8jxvg\" (UniqueName: \"kubernetes.io/projected/4190d598-71ee-4d5d-885c-914ba454df27-kube-api-access-8jxvg\") pod \"machine-approver-56656f9798-hsjfc\" (UID: \"4190d598-71ee-4d5d-885c-914ba454df27\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hsjfc" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.434762 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xjds4\" (UniqueName: \"kubernetes.io/projected/9dab85a1-11f8-45ee-ab81-394ead31aab5-kube-api-access-xjds4\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.434792 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c8bf226e-44f3-494c-b837-b9e8b9f9904d-client-ca\") pod \"controller-manager-879f6c89f-srrn8\" (UID: \"c8bf226e-44f3-494c-b837-b9e8b9f9904d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-srrn8" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.434912 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/44450497-9e24-4702-8614-894197881904-config\") pod \"openshift-apiserver-operator-796bbdcf4f-45d9l\" (UID: \"44450497-9e24-4702-8614-894197881904\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-45d9l" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.435010 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1a9dfec2-b952-4e84-9d99-377792feb851-audit\") pod \"apiserver-76f77b778f-sb8md\" (UID: \"1a9dfec2-b952-4e84-9d99-377792feb851\") " pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.435042 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hjrsc\" (UniqueName: \"kubernetes.io/projected/1a9dfec2-b952-4e84-9d99-377792feb851-kube-api-access-hjrsc\") pod \"apiserver-76f77b778f-sb8md\" (UID: \"1a9dfec2-b952-4e84-9d99-377792feb851\") " pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.435066 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/2396b594-22b4-4052-b767-54e2aaf1b0dc-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-fjr4q\" (UID: \"2396b594-22b4-4052-b767-54e2aaf1b0dc\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fjr4q" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.435098 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.435122 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-q7sg8"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.435154 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.435015 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.435195 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rqjw6\" (UniqueName: \"kubernetes.io/projected/c8bf226e-44f3-494c-b837-b9e8b9f9904d-kube-api-access-rqjw6\") pod \"controller-manager-879f6c89f-srrn8\" (UID: \"c8bf226e-44f3-494c-b837-b9e8b9f9904d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-srrn8" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.435222 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2kdq6" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.435239 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/4190d598-71ee-4d5d-885c-914ba454df27-auth-proxy-config\") pod \"machine-approver-56656f9798-hsjfc\" (UID: \"4190d598-71ee-4d5d-885c-914ba454df27\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hsjfc" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.435268 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a9dfec2-b952-4e84-9d99-377792feb851-config\") pod \"apiserver-76f77b778f-sb8md\" (UID: \"1a9dfec2-b952-4e84-9d99-377792feb851\") " pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.435295 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1a9dfec2-b952-4e84-9d99-377792feb851-etcd-serving-ca\") pod \"apiserver-76f77b778f-sb8md\" (UID: \"1a9dfec2-b952-4e84-9d99-377792feb851\") " pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.435119 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.435335 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7hzzd\" (UniqueName: \"kubernetes.io/projected/a2d7193d-eb53-446a-a96c-49d28dbbe724-kube-api-access-7hzzd\") pod \"downloads-7954f5f757-jvdld\" (UID: \"a2d7193d-eb53-446a-a96c-49d28dbbe724\") " pod="openshift-console/downloads-7954f5f757-jvdld" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.435363 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cc554d84-4ddd-468b-ac0f-b41f2ad5d26f-serving-cert\") pod \"openshift-config-operator-7777fb866f-cbzc5\" (UID: \"cc554d84-4ddd-468b-ac0f-b41f2ad5d26f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-cbzc5" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.435386 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.435407 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.435427 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.435144 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.435447 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1a9dfec2-b952-4e84-9d99-377792feb851-trusted-ca-bundle\") pod \"apiserver-76f77b778f-sb8md\" (UID: \"1a9dfec2-b952-4e84-9d99-377792feb851\") " pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.435468 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1a9dfec2-b952-4e84-9d99-377792feb851-image-import-ca\") pod \"apiserver-76f77b778f-sb8md\" (UID: \"1a9dfec2-b952-4e84-9d99-377792feb851\") " pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.435492 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.435185 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.435561 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.435588 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c8bf226e-44f3-494c-b837-b9e8b9f9904d-serving-cert\") pod \"controller-manager-879f6c89f-srrn8\" (UID: \"c8bf226e-44f3-494c-b837-b9e8b9f9904d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-srrn8" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.435611 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/44450497-9e24-4702-8614-894197881904-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-45d9l\" (UID: \"44450497-9e24-4702-8614-894197881904\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-45d9l" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.435660 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-mvn6l"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.435691 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1a9dfec2-b952-4e84-9d99-377792feb851-serving-cert\") pod \"apiserver-76f77b778f-sb8md\" (UID: \"1a9dfec2-b952-4e84-9d99-377792feb851\") " pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.435741 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/cc554d84-4ddd-468b-ac0f-b41f2ad5d26f-available-featuregates\") pod \"openshift-config-operator-7777fb866f-cbzc5\" (UID: \"cc554d84-4ddd-468b-ac0f-b41f2ad5d26f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-cbzc5" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.435769 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-q7sg8" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.435834 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vbtlt\" (UniqueName: \"kubernetes.io/projected/2396b594-22b4-4052-b767-54e2aaf1b0dc-kube-api-access-vbtlt\") pod \"cluster-samples-operator-665b6dd947-fjr4q\" (UID: \"2396b594-22b4-4052-b767-54e2aaf1b0dc\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fjr4q" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.435863 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/9dab85a1-11f8-45ee-ab81-394ead31aab5-audit-dir\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.435902 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-nqm2j"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.435949 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mcdnl\" (UniqueName: \"kubernetes.io/projected/44450497-9e24-4702-8614-894197881904-kube-api-access-mcdnl\") pod \"openshift-apiserver-operator-796bbdcf4f-45d9l\" (UID: \"44450497-9e24-4702-8614-894197881904\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-45d9l" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.435999 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/1a9dfec2-b952-4e84-9d99-377792feb851-node-pullsecrets\") pod \"apiserver-76f77b778f-sb8md\" (UID: \"1a9dfec2-b952-4e84-9d99-377792feb851\") " pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.436022 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mxgbn\" (UniqueName: \"kubernetes.io/projected/cc554d84-4ddd-468b-ac0f-b41f2ad5d26f-kube-api-access-mxgbn\") pod \"openshift-config-operator-7777fb866f-cbzc5\" (UID: \"cc554d84-4ddd-468b-ac0f-b41f2ad5d26f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-cbzc5" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.436044 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/9dab85a1-11f8-45ee-ab81-394ead31aab5-audit-policies\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.436066 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4190d598-71ee-4d5d-885c-914ba454df27-config\") pod \"machine-approver-56656f9798-hsjfc\" (UID: \"4190d598-71ee-4d5d-885c-914ba454df27\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hsjfc" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.436086 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1a9dfec2-b952-4e84-9d99-377792feb851-etcd-client\") pod \"apiserver-76f77b778f-sb8md\" (UID: \"1a9dfec2-b952-4e84-9d99-377792feb851\") " pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.436112 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1a9dfec2-b952-4e84-9d99-377792feb851-encryption-config\") pod \"apiserver-76f77b778f-sb8md\" (UID: \"1a9dfec2-b952-4e84-9d99-377792feb851\") " pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.436137 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c8bf226e-44f3-494c-b837-b9e8b9f9904d-config\") pod \"controller-manager-879f6c89f-srrn8\" (UID: \"c8bf226e-44f3-494c-b837-b9e8b9f9904d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-srrn8" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.436151 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-nqm2j" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.436168 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.436393 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-mvn6l" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.437125 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.442756 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-krv84"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.443239 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bdb45"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.443467 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rlq4k"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.443789 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rlq4k" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.444086 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-krv84" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.444200 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.444256 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bdb45" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.444525 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.445035 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.445197 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.445281 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.445421 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.445737 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.445920 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.449979 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.452318 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-mz9k6"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.452491 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.452720 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-mz9k6" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.453889 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.454766 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.456018 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.456201 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.456338 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.457061 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.458963 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.458989 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r5wdv"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.459633 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r5wdv" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.460156 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401050-jck8z"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.460664 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6rfsb"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.461009 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6rfsb" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.461198 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-jck8z" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.461392 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-vhjzg"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.461921 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-vhjzg" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.463861 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-6qzzx"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.464231 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-sd29s"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.464493 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-cd58s"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.464543 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.464769 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6qzzx" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.464879 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-cd58s" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.491277 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.491327 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.491400 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.491421 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.491806 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.492691 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.492933 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-542js"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.492937 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.493468 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.493034 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.493571 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.493100 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.493705 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.493710 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-4nzhc"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.493792 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-542js" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.493934 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.494016 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.494083 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.494359 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.494435 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.494532 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4nzhc" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.496723 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-p2x8c"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.497276 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-52bvk"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.497879 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-52bvk" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.498006 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-p2x8c" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.498119 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-452s8"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.498021 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.498458 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.498512 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.498588 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.498646 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.498661 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.498725 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.498807 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.498825 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.498873 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.498828 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.498973 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.499058 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-452s8" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.500434 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.500781 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.501344 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.501836 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.502286 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.501866 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.502637 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-vc2n9"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.502810 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.503068 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.503291 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-vc2n9" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.503622 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.503857 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.504111 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.504758 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.507619 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.509802 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.511254 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-6c7r5"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.512140 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-8q9m5"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.512299 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-6c7r5" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.512917 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8q9m5" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.513883 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-chcz8"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.514417 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-chcz8" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.521082 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hm6rl"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.521739 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-ln89b"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.522195 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-ln89b" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.522497 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hm6rl" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.522750 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-t99fd"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.523183 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-t99fd" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.524919 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-h5cbk"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.525659 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-h5cbk" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.526161 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-m5689"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.526707 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-m5689" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.527266 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-sb8md"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.528643 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-srrn8"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.530448 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.532956 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-vfjfs"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.533626 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-vfjfs" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.535378 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-45d9l"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.536961 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1a9dfec2-b952-4e84-9d99-377792feb851-image-import-ca\") pod \"apiserver-76f77b778f-sb8md\" (UID: \"1a9dfec2-b952-4e84-9d99-377792feb851\") " pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.538584 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.538733 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.538826 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c8bf226e-44f3-494c-b837-b9e8b9f9904d-serving-cert\") pod \"controller-manager-879f6c89f-srrn8\" (UID: \"c8bf226e-44f3-494c-b837-b9e8b9f9904d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-srrn8" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.538910 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/44450497-9e24-4702-8614-894197881904-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-45d9l\" (UID: \"44450497-9e24-4702-8614-894197881904\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-45d9l" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.539008 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1a9dfec2-b952-4e84-9d99-377792feb851-serving-cert\") pod \"apiserver-76f77b778f-sb8md\" (UID: \"1a9dfec2-b952-4e84-9d99-377792feb851\") " pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.539104 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/cc554d84-4ddd-468b-ac0f-b41f2ad5d26f-available-featuregates\") pod \"openshift-config-operator-7777fb866f-cbzc5\" (UID: \"cc554d84-4ddd-468b-ac0f-b41f2ad5d26f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-cbzc5" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.539224 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vbtlt\" (UniqueName: \"kubernetes.io/projected/2396b594-22b4-4052-b767-54e2aaf1b0dc-kube-api-access-vbtlt\") pod \"cluster-samples-operator-665b6dd947-fjr4q\" (UID: \"2396b594-22b4-4052-b767-54e2aaf1b0dc\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fjr4q" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.539311 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/9dab85a1-11f8-45ee-ab81-394ead31aab5-audit-dir\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.539408 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mcdnl\" (UniqueName: \"kubernetes.io/projected/44450497-9e24-4702-8614-894197881904-kube-api-access-mcdnl\") pod \"openshift-apiserver-operator-796bbdcf4f-45d9l\" (UID: \"44450497-9e24-4702-8614-894197881904\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-45d9l" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.539503 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/1a9dfec2-b952-4e84-9d99-377792feb851-node-pullsecrets\") pod \"apiserver-76f77b778f-sb8md\" (UID: \"1a9dfec2-b952-4e84-9d99-377792feb851\") " pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.539610 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1a9dfec2-b952-4e84-9d99-377792feb851-etcd-client\") pod \"apiserver-76f77b778f-sb8md\" (UID: \"1a9dfec2-b952-4e84-9d99-377792feb851\") " pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.539731 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mxgbn\" (UniqueName: \"kubernetes.io/projected/cc554d84-4ddd-468b-ac0f-b41f2ad5d26f-kube-api-access-mxgbn\") pod \"openshift-config-operator-7777fb866f-cbzc5\" (UID: \"cc554d84-4ddd-468b-ac0f-b41f2ad5d26f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-cbzc5" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.539821 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/9dab85a1-11f8-45ee-ab81-394ead31aab5-audit-policies\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.539909 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4190d598-71ee-4d5d-885c-914ba454df27-config\") pod \"machine-approver-56656f9798-hsjfc\" (UID: \"4190d598-71ee-4d5d-885c-914ba454df27\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hsjfc" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.540101 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1a9dfec2-b952-4e84-9d99-377792feb851-encryption-config\") pod \"apiserver-76f77b778f-sb8md\" (UID: \"1a9dfec2-b952-4e84-9d99-377792feb851\") " pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.540237 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c8bf226e-44f3-494c-b837-b9e8b9f9904d-config\") pod \"controller-manager-879f6c89f-srrn8\" (UID: \"c8bf226e-44f3-494c-b837-b9e8b9f9904d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-srrn8" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.540308 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1a9dfec2-b952-4e84-9d99-377792feb851-image-import-ca\") pod \"apiserver-76f77b778f-sb8md\" (UID: \"1a9dfec2-b952-4e84-9d99-377792feb851\") " pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.540395 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/cc554d84-4ddd-468b-ac0f-b41f2ad5d26f-available-featuregates\") pod \"openshift-config-operator-7777fb866f-cbzc5\" (UID: \"cc554d84-4ddd-468b-ac0f-b41f2ad5d26f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-cbzc5" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.540412 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.540575 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.540890 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.540975 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c8bf226e-44f3-494c-b837-b9e8b9f9904d-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-srrn8\" (UID: \"c8bf226e-44f3-494c-b837-b9e8b9f9904d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-srrn8" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.541058 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/1a9dfec2-b952-4e84-9d99-377792feb851-audit-dir\") pod \"apiserver-76f77b778f-sb8md\" (UID: \"1a9dfec2-b952-4e84-9d99-377792feb851\") " pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.541142 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.541235 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/4190d598-71ee-4d5d-885c-914ba454df27-machine-approver-tls\") pod \"machine-approver-56656f9798-hsjfc\" (UID: \"4190d598-71ee-4d5d-885c-914ba454df27\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hsjfc" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.541320 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8jxvg\" (UniqueName: \"kubernetes.io/projected/4190d598-71ee-4d5d-885c-914ba454df27-kube-api-access-8jxvg\") pod \"machine-approver-56656f9798-hsjfc\" (UID: \"4190d598-71ee-4d5d-885c-914ba454df27\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hsjfc" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.541403 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xjds4\" (UniqueName: \"kubernetes.io/projected/9dab85a1-11f8-45ee-ab81-394ead31aab5-kube-api-access-xjds4\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.541503 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c8bf226e-44f3-494c-b837-b9e8b9f9904d-client-ca\") pod \"controller-manager-879f6c89f-srrn8\" (UID: \"c8bf226e-44f3-494c-b837-b9e8b9f9904d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-srrn8" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.541620 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/2396b594-22b4-4052-b767-54e2aaf1b0dc-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-fjr4q\" (UID: \"2396b594-22b4-4052-b767-54e2aaf1b0dc\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fjr4q" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.540000 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/9dab85a1-11f8-45ee-ab81-394ead31aab5-audit-dir\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.541741 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/44450497-9e24-4702-8614-894197881904-config\") pod \"openshift-apiserver-operator-796bbdcf4f-45d9l\" (UID: \"44450497-9e24-4702-8614-894197881904\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-45d9l" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.541859 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1a9dfec2-b952-4e84-9d99-377792feb851-audit\") pod \"apiserver-76f77b778f-sb8md\" (UID: \"1a9dfec2-b952-4e84-9d99-377792feb851\") " pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.541899 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hjrsc\" (UniqueName: \"kubernetes.io/projected/1a9dfec2-b952-4e84-9d99-377792feb851-kube-api-access-hjrsc\") pod \"apiserver-76f77b778f-sb8md\" (UID: \"1a9dfec2-b952-4e84-9d99-377792feb851\") " pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.541958 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.541993 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.542026 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rqjw6\" (UniqueName: \"kubernetes.io/projected/c8bf226e-44f3-494c-b837-b9e8b9f9904d-kube-api-access-rqjw6\") pod \"controller-manager-879f6c89f-srrn8\" (UID: \"c8bf226e-44f3-494c-b837-b9e8b9f9904d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-srrn8" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.542054 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/4190d598-71ee-4d5d-885c-914ba454df27-auth-proxy-config\") pod \"machine-approver-56656f9798-hsjfc\" (UID: \"4190d598-71ee-4d5d-885c-914ba454df27\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hsjfc" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.542079 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a9dfec2-b952-4e84-9d99-377792feb851-config\") pod \"apiserver-76f77b778f-sb8md\" (UID: \"1a9dfec2-b952-4e84-9d99-377792feb851\") " pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.542103 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1a9dfec2-b952-4e84-9d99-377792feb851-etcd-serving-ca\") pod \"apiserver-76f77b778f-sb8md\" (UID: \"1a9dfec2-b952-4e84-9d99-377792feb851\") " pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.542144 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7hzzd\" (UniqueName: \"kubernetes.io/projected/a2d7193d-eb53-446a-a96c-49d28dbbe724-kube-api-access-7hzzd\") pod \"downloads-7954f5f757-jvdld\" (UID: \"a2d7193d-eb53-446a-a96c-49d28dbbe724\") " pod="openshift-console/downloads-7954f5f757-jvdld" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.542179 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cc554d84-4ddd-468b-ac0f-b41f2ad5d26f-serving-cert\") pod \"openshift-config-operator-7777fb866f-cbzc5\" (UID: \"cc554d84-4ddd-468b-ac0f-b41f2ad5d26f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-cbzc5" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.542205 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.542230 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.542255 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.542277 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1a9dfec2-b952-4e84-9d99-377792feb851-trusted-ca-bundle\") pod \"apiserver-76f77b778f-sb8md\" (UID: \"1a9dfec2-b952-4e84-9d99-377792feb851\") " pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.542274 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.542768 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c8bf226e-44f3-494c-b837-b9e8b9f9904d-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-srrn8\" (UID: \"c8bf226e-44f3-494c-b837-b9e8b9f9904d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-srrn8" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.540573 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4190d598-71ee-4d5d-885c-914ba454df27-config\") pod \"machine-approver-56656f9798-hsjfc\" (UID: \"4190d598-71ee-4d5d-885c-914ba454df27\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hsjfc" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.543099 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1a9dfec2-b952-4e84-9d99-377792feb851-audit\") pod \"apiserver-76f77b778f-sb8md\" (UID: \"1a9dfec2-b952-4e84-9d99-377792feb851\") " pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.543285 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/1a9dfec2-b952-4e84-9d99-377792feb851-node-pullsecrets\") pod \"apiserver-76f77b778f-sb8md\" (UID: \"1a9dfec2-b952-4e84-9d99-377792feb851\") " pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.543961 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/1a9dfec2-b952-4e84-9d99-377792feb851-audit-dir\") pod \"apiserver-76f77b778f-sb8md\" (UID: \"1a9dfec2-b952-4e84-9d99-377792feb851\") " pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.544151 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a9dfec2-b952-4e84-9d99-377792feb851-config\") pod \"apiserver-76f77b778f-sb8md\" (UID: \"1a9dfec2-b952-4e84-9d99-377792feb851\") " pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.544343 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/44450497-9e24-4702-8614-894197881904-config\") pod \"openshift-apiserver-operator-796bbdcf4f-45d9l\" (UID: \"44450497-9e24-4702-8614-894197881904\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-45d9l" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.541544 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/9dab85a1-11f8-45ee-ab81-394ead31aab5-audit-policies\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.546631 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.548136 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c8bf226e-44f3-494c-b837-b9e8b9f9904d-config\") pod \"controller-manager-879f6c89f-srrn8\" (UID: \"c8bf226e-44f3-494c-b837-b9e8b9f9904d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-srrn8" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.548159 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/4190d598-71ee-4d5d-885c-914ba454df27-auth-proxy-config\") pod \"machine-approver-56656f9798-hsjfc\" (UID: \"4190d598-71ee-4d5d-885c-914ba454df27\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hsjfc" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.549102 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.549459 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.549890 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1a9dfec2-b952-4e84-9d99-377792feb851-etcd-client\") pod \"apiserver-76f77b778f-sb8md\" (UID: \"1a9dfec2-b952-4e84-9d99-377792feb851\") " pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.550234 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.550515 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.550546 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/44450497-9e24-4702-8614-894197881904-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-45d9l\" (UID: \"44450497-9e24-4702-8614-894197881904\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-45d9l" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.550917 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.553188 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c8bf226e-44f3-494c-b837-b9e8b9f9904d-client-ca\") pod \"controller-manager-879f6c89f-srrn8\" (UID: \"c8bf226e-44f3-494c-b837-b9e8b9f9904d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-srrn8" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.554215 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/2396b594-22b4-4052-b767-54e2aaf1b0dc-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-fjr4q\" (UID: \"2396b594-22b4-4052-b767-54e2aaf1b0dc\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fjr4q" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.554268 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.554421 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1a9dfec2-b952-4e84-9d99-377792feb851-trusted-ca-bundle\") pod \"apiserver-76f77b778f-sb8md\" (UID: \"1a9dfec2-b952-4e84-9d99-377792feb851\") " pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.554620 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cc554d84-4ddd-468b-ac0f-b41f2ad5d26f-serving-cert\") pod \"openshift-config-operator-7777fb866f-cbzc5\" (UID: \"cc554d84-4ddd-468b-ac0f-b41f2ad5d26f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-cbzc5" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.554617 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.555055 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1a9dfec2-b952-4e84-9d99-377792feb851-serving-cert\") pod \"apiserver-76f77b778f-sb8md\" (UID: \"1a9dfec2-b952-4e84-9d99-377792feb851\") " pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.559179 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c8bf226e-44f3-494c-b837-b9e8b9f9904d-serving-cert\") pod \"controller-manager-879f6c89f-srrn8\" (UID: \"c8bf226e-44f3-494c-b837-b9e8b9f9904d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-srrn8" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.559289 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1a9dfec2-b952-4e84-9d99-377792feb851-etcd-serving-ca\") pod \"apiserver-76f77b778f-sb8md\" (UID: \"1a9dfec2-b952-4e84-9d99-377792feb851\") " pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.559372 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-cbzc5"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.561564 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/4190d598-71ee-4d5d-885c-914ba454df27-machine-approver-tls\") pod \"machine-approver-56656f9798-hsjfc\" (UID: \"4190d598-71ee-4d5d-885c-914ba454df27\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hsjfc" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.561959 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1a9dfec2-b952-4e84-9d99-377792feb851-encryption-config\") pod \"apiserver-76f77b778f-sb8md\" (UID: \"1a9dfec2-b952-4e84-9d99-377792feb851\") " pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.563250 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.564063 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2kdq6"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.564319 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.564521 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.567231 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-q7sg8"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.567644 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.569459 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rlq4k"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.570959 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-jvdld"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.571982 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-62dll"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.573185 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fjr4q"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.574222 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-nqm2j"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.575360 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-5hvjb"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.576302 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401050-jck8z"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.577503 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-52bvk"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.578852 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-vhjzg"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.583027 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bdb45"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.583178 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.585531 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-4nzhc"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.588755 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6rfsb"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.591305 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-452s8"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.593541 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-6qzzx"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.595275 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-mz9k6"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.596795 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-6c7r5"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.598836 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-sd29s"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.600741 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-mvn6l"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.604593 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-cd58s"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.607376 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-krv84"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.607461 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.608275 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-t99fd"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.609222 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-8q9m5"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.610158 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r5wdv"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.612710 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-chcz8"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.613505 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hm6rl"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.614948 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-ln89b"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.615697 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-542js"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.616355 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-m5689"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.617402 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-pxbm7"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.618737 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-kjgl8"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.619055 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-pxbm7" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.619660 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-kjgl8" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.620299 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-p2x8c"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.621614 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-h5cbk"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.622930 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-pxbm7"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.623293 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.624097 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-kjgl8"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.625086 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-4nz5q"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.625981 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-4nz5q" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.626048 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-4nz5q"] Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.644381 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.663395 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.682882 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.703290 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.723173 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.743790 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.765077 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.787632 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.803734 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.823750 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.843851 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.863364 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.883784 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.904123 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.924268 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.944414 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.963696 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 25 09:38:08 crc kubenswrapper[4854]: I1125 09:38:08.983837 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.002915 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.023443 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.043572 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.063964 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.084534 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.109402 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.144359 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.164151 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.184161 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.202703 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.223657 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.249547 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.264072 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.284222 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.303840 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.323165 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.343386 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.363406 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.383179 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.423272 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.443804 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.463155 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.484548 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.504371 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.522225 4854 request.go:700] Waited for 1.01858592s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-ingress/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&limit=500&resourceVersion=0 Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.523877 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.543754 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.564030 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.583798 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.604337 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.624196 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.644091 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.664414 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.684050 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.704495 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.724349 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.743279 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.763605 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.784123 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.803325 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.824542 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.844202 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.871940 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.884376 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.903844 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.922972 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.943256 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.963325 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 25 09:38:09 crc kubenswrapper[4854]: I1125 09:38:09.983341 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.004089 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.013253 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.013634 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.013736 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.013886 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.023736 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.043659 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.063715 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.083883 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.103771 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.123394 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.144037 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.174467 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.183355 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.204110 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.224185 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.257511 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vbtlt\" (UniqueName: \"kubernetes.io/projected/2396b594-22b4-4052-b767-54e2aaf1b0dc-kube-api-access-vbtlt\") pod \"cluster-samples-operator-665b6dd947-fjr4q\" (UID: \"2396b594-22b4-4052-b767-54e2aaf1b0dc\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fjr4q" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.279531 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mxgbn\" (UniqueName: \"kubernetes.io/projected/cc554d84-4ddd-468b-ac0f-b41f2ad5d26f-kube-api-access-mxgbn\") pod \"openshift-config-operator-7777fb866f-cbzc5\" (UID: \"cc554d84-4ddd-468b-ac0f-b41f2ad5d26f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-cbzc5" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.300659 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mcdnl\" (UniqueName: \"kubernetes.io/projected/44450497-9e24-4702-8614-894197881904-kube-api-access-mcdnl\") pod \"openshift-apiserver-operator-796bbdcf4f-45d9l\" (UID: \"44450497-9e24-4702-8614-894197881904\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-45d9l" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.309758 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-cbzc5" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.319584 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjrsc\" (UniqueName: \"kubernetes.io/projected/1a9dfec2-b952-4e84-9d99-377792feb851-kube-api-access-hjrsc\") pod \"apiserver-76f77b778f-sb8md\" (UID: \"1a9dfec2-b952-4e84-9d99-377792feb851\") " pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.340229 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7hzzd\" (UniqueName: \"kubernetes.io/projected/a2d7193d-eb53-446a-a96c-49d28dbbe724-kube-api-access-7hzzd\") pod \"downloads-7954f5f757-jvdld\" (UID: \"a2d7193d-eb53-446a-a96c-49d28dbbe724\") " pod="openshift-console/downloads-7954f5f757-jvdld" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.342781 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-jvdld" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.361432 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8jxvg\" (UniqueName: \"kubernetes.io/projected/4190d598-71ee-4d5d-885c-914ba454df27-kube-api-access-8jxvg\") pod \"machine-approver-56656f9798-hsjfc\" (UID: \"4190d598-71ee-4d5d-885c-914ba454df27\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hsjfc" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.379309 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xjds4\" (UniqueName: \"kubernetes.io/projected/9dab85a1-11f8-45ee-ab81-394ead31aab5-kube-api-access-xjds4\") pod \"oauth-openshift-558db77b4-62dll\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.386076 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.403391 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rqjw6\" (UniqueName: \"kubernetes.io/projected/c8bf226e-44f3-494c-b837-b9e8b9f9904d-kube-api-access-rqjw6\") pod \"controller-manager-879f6c89f-srrn8\" (UID: \"c8bf226e-44f3-494c-b837-b9e8b9f9904d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-srrn8" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.403507 4854 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.423980 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.445002 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.465086 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.483920 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.498873 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-cbzc5"] Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.504000 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.505014 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:10 crc kubenswrapper[4854]: W1125 09:38:10.511538 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcc554d84_4ddd_468b_ac0f_b41f2ad5d26f.slice/crio-cb9a567ed6a6d780bdd7175e07fe414f34657579d07e46c3cc47b2a81e765c12 WatchSource:0}: Error finding container cb9a567ed6a6d780bdd7175e07fe414f34657579d07e46c3cc47b2a81e765c12: Status 404 returned error can't find the container with id cb9a567ed6a6d780bdd7175e07fe414f34657579d07e46c3cc47b2a81e765c12 Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.518347 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-jvdld"] Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.522930 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 25 09:38:10 crc kubenswrapper[4854]: W1125 09:38:10.525191 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda2d7193d_eb53_446a_a96c_49d28dbbe724.slice/crio-5877ad6f0c8dfc475f0e53e209d9676db37cf2745b0185c5f40ffeb2787397a4 WatchSource:0}: Error finding container 5877ad6f0c8dfc475f0e53e209d9676db37cf2745b0185c5f40ffeb2787397a4: Status 404 returned error can't find the container with id 5877ad6f0c8dfc475f0e53e209d9676db37cf2745b0185c5f40ffeb2787397a4 Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.538527 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fjr4q" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.542432 4854 request.go:700] Waited for 1.916272983s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-dns/secrets?fieldSelector=metadata.name%3Ddns-dockercfg-jwfmh&limit=500&resourceVersion=0 Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.544960 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.560560 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-62dll"] Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.560603 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-srrn8" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.563783 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 25 09:38:10 crc kubenswrapper[4854]: W1125 09:38:10.564624 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9dab85a1_11f8_45ee_ab81_394ead31aab5.slice/crio-c7dfb0bbf86fd9bd0e9d6c2a07a0c0704826ff5d674c1581a8ebfe9c8495a6d7 WatchSource:0}: Error finding container c7dfb0bbf86fd9bd0e9d6c2a07a0c0704826ff5d674c1581a8ebfe9c8495a6d7: Status 404 returned error can't find the container with id c7dfb0bbf86fd9bd0e9d6c2a07a0c0704826ff5d674c1581a8ebfe9c8495a6d7 Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.584488 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.600370 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-45d9l" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.609244 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.634683 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hsjfc" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.663306 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/f34856c6-ef13-4ff0-95b4-8a9f6b88b729-encryption-config\") pod \"apiserver-7bbb656c7d-5hvjb\" (UID: \"f34856c6-ef13-4ff0-95b4-8a9f6b88b729\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5hvjb" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.663364 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/30a1a0d5-62ce-4910-a239-58d552e12e59-proxy-tls\") pod \"machine-config-controller-84d6567774-542js\" (UID: \"30a1a0d5-62ce-4910-a239-58d552e12e59\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-542js" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.663388 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-bound-sa-token\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.663447 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/9117eb3b-0187-4957-b851-da9e4c229c8f-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-mz9k6\" (UID: \"9117eb3b-0187-4957-b851-da9e4c229c8f\") " pod="openshift-marketplace/marketplace-operator-79b997595-mz9k6" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.663470 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/dc1611b6-bd24-4ae5-a332-3a336786b9d5-metrics-tls\") pod \"dns-operator-744455d44c-52bvk\" (UID: \"dc1611b6-bd24-4ae5-a332-3a336786b9d5\") " pod="openshift-dns-operator/dns-operator-744455d44c-52bvk" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.663491 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jcj2g\" (UniqueName: \"kubernetes.io/projected/7ea5d904-b5da-4a4a-9221-f808841b0052-kube-api-access-jcj2g\") pod \"collect-profiles-29401050-jck8z\" (UID: \"7ea5d904-b5da-4a4a-9221-f808841b0052\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-jck8z" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.663513 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d606b975-0bde-4cfa-b190-86b7da1df764-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-q7sg8\" (UID: \"d606b975-0bde-4cfa-b190-86b7da1df764\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-q7sg8" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.663553 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/243c3e75-c67a-4dcf-b76d-bc1920af0a41-client-ca\") pod \"route-controller-manager-6576b87f9c-6qzzx\" (UID: \"243c3e75-c67a-4dcf-b76d-bc1920af0a41\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6qzzx" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.663576 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a4854090-19c4-4d8a-9023-1f3ae3fac5d5-trusted-ca\") pod \"console-operator-58897d9998-p2x8c\" (UID: \"a4854090-19c4-4d8a-9023-1f3ae3fac5d5\") " pod="openshift-console-operator/console-operator-58897d9998-p2x8c" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.663598 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-ca-trust-extracted\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.663725 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8d4de16b-3a82-47e6-aab5-e41ab8f871fb-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-cd58s\" (UID: \"8d4de16b-3a82-47e6-aab5-e41ab8f871fb\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-cd58s" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.663752 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5254b1e-3543-4304-b361-d8419d09548e-config\") pod \"etcd-operator-b45778765-mvn6l\" (UID: \"b5254b1e-3543-4304-b361-d8419d09548e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mvn6l" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.663776 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rxl4g\" (UniqueName: \"kubernetes.io/projected/c06ba819-662b-4eaa-93f8-ea5e462d5aec-kube-api-access-rxl4g\") pod \"olm-operator-6b444d44fb-r5wdv\" (UID: \"c06ba819-662b-4eaa-93f8-ea5e462d5aec\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r5wdv" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.663811 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/243c3e75-c67a-4dcf-b76d-bc1920af0a41-config\") pod \"route-controller-manager-6576b87f9c-6qzzx\" (UID: \"243c3e75-c67a-4dcf-b76d-bc1920af0a41\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6qzzx" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.663836 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/eaeeed45-4bb5-446f-8afb-67ae1c8532c8-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-2kdq6\" (UID: \"eaeeed45-4bb5-446f-8afb-67ae1c8532c8\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2kdq6" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.663859 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/64098d81-ada3-4e0d-ac74-3d94b7247437-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-bdb45\" (UID: \"64098d81-ada3-4e0d-ac74-3d94b7247437\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bdb45" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.663925 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/30a1a0d5-62ce-4910-a239-58d552e12e59-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-542js\" (UID: \"30a1a0d5-62ce-4910-a239-58d552e12e59\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-542js" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.663948 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/f34856c6-ef13-4ff0-95b4-8a9f6b88b729-etcd-client\") pod \"apiserver-7bbb656c7d-5hvjb\" (UID: \"f34856c6-ef13-4ff0-95b4-8a9f6b88b729\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5hvjb" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.663970 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/43b016d4-5802-4d1b-a99f-bc7728d7162f-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-nqm2j\" (UID: \"43b016d4-5802-4d1b-a99f-bc7728d7162f\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-nqm2j" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.664032 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-62lvw\" (UniqueName: \"kubernetes.io/projected/43b016d4-5802-4d1b-a99f-bc7728d7162f-kube-api-access-62lvw\") pod \"cluster-image-registry-operator-dc59b4c8b-nqm2j\" (UID: \"43b016d4-5802-4d1b-a99f-bc7728d7162f\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-nqm2j" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.664053 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f34856c6-ef13-4ff0-95b4-8a9f6b88b729-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-5hvjb\" (UID: \"f34856c6-ef13-4ff0-95b4-8a9f6b88b729\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5hvjb" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.664077 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/10e58b8d-5283-40d1-9707-d844963263fe-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-rlq4k\" (UID: \"10e58b8d-5283-40d1-9707-d844963263fe\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rlq4k" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.664145 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/b5254b1e-3543-4304-b361-d8419d09548e-etcd-ca\") pod \"etcd-operator-b45778765-mvn6l\" (UID: \"b5254b1e-3543-4304-b361-d8419d09548e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mvn6l" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.664168 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-flhsn\" (UniqueName: \"kubernetes.io/projected/b5254b1e-3543-4304-b361-d8419d09548e-kube-api-access-flhsn\") pod \"etcd-operator-b45778765-mvn6l\" (UID: \"b5254b1e-3543-4304-b361-d8419d09548e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mvn6l" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.664209 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-registry-tls\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.664233 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c2756\" (UniqueName: \"kubernetes.io/projected/594e4cd2-32ab-4150-b5b3-4d167c35fb84-kube-api-access-c2756\") pod \"packageserver-d55dfcdfc-6rfsb\" (UID: \"594e4cd2-32ab-4150-b5b3-4d167c35fb84\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6rfsb" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.664257 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/243c3e75-c67a-4dcf-b76d-bc1920af0a41-serving-cert\") pod \"route-controller-manager-6576b87f9c-6qzzx\" (UID: \"243c3e75-c67a-4dcf-b76d-bc1920af0a41\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6qzzx" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.664284 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/594e4cd2-32ab-4150-b5b3-4d167c35fb84-webhook-cert\") pod \"packageserver-d55dfcdfc-6rfsb\" (UID: \"594e4cd2-32ab-4150-b5b3-4d167c35fb84\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6rfsb" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.664309 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.664330 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9117eb3b-0187-4957-b851-da9e4c229c8f-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-mz9k6\" (UID: \"9117eb3b-0187-4957-b851-da9e4c229c8f\") " pod="openshift-marketplace/marketplace-operator-79b997595-mz9k6" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.664356 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/eb03f4b9-cf07-48bd-98a6-f0b9dfdc4e7b-trusted-ca\") pod \"ingress-operator-5b745b69d9-4nzhc\" (UID: \"eb03f4b9-cf07-48bd-98a6-f0b9dfdc4e7b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4nzhc" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.664377 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hkdkg\" (UniqueName: \"kubernetes.io/projected/d606b975-0bde-4cfa-b190-86b7da1df764-kube-api-access-hkdkg\") pod \"kube-storage-version-migrator-operator-b67b599dd-q7sg8\" (UID: \"d606b975-0bde-4cfa-b190-86b7da1df764\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-q7sg8" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.664398 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8d4de16b-3a82-47e6-aab5-e41ab8f871fb-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-cd58s\" (UID: \"8d4de16b-3a82-47e6-aab5-e41ab8f871fb\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-cd58s" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.664427 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-td6m7\" (UniqueName: \"kubernetes.io/projected/5912e701-8be4-4f6b-94a0-6ab69f81cef5-kube-api-access-td6m7\") pod \"multus-admission-controller-857f4d67dd-krv84\" (UID: \"5912e701-8be4-4f6b-94a0-6ab69f81cef5\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-krv84" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.664451 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/eb03f4b9-cf07-48bd-98a6-f0b9dfdc4e7b-bound-sa-token\") pod \"ingress-operator-5b745b69d9-4nzhc\" (UID: \"eb03f4b9-cf07-48bd-98a6-f0b9dfdc4e7b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4nzhc" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.664472 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/64098d81-ada3-4e0d-ac74-3d94b7247437-config\") pod \"kube-controller-manager-operator-78b949d7b-bdb45\" (UID: \"64098d81-ada3-4e0d-ac74-3d94b7247437\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bdb45" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.664498 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/b5254b1e-3543-4304-b361-d8419d09548e-etcd-client\") pod \"etcd-operator-b45778765-mvn6l\" (UID: \"b5254b1e-3543-4304-b361-d8419d09548e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mvn6l" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.664522 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/594e4cd2-32ab-4150-b5b3-4d167c35fb84-tmpfs\") pod \"packageserver-d55dfcdfc-6rfsb\" (UID: \"594e4cd2-32ab-4150-b5b3-4d167c35fb84\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6rfsb" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.664549 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c06ba819-662b-4eaa-93f8-ea5e462d5aec-srv-cert\") pod \"olm-operator-6b444d44fb-r5wdv\" (UID: \"c06ba819-662b-4eaa-93f8-ea5e462d5aec\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r5wdv" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.664571 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mq9x9\" (UniqueName: \"kubernetes.io/projected/a4854090-19c4-4d8a-9023-1f3ae3fac5d5-kube-api-access-mq9x9\") pod \"console-operator-58897d9998-p2x8c\" (UID: \"a4854090-19c4-4d8a-9023-1f3ae3fac5d5\") " pod="openshift-console-operator/console-operator-58897d9998-p2x8c" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.664607 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7ea5d904-b5da-4a4a-9221-f808841b0052-secret-volume\") pod \"collect-profiles-29401050-jck8z\" (UID: \"7ea5d904-b5da-4a4a-9221-f808841b0052\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-jck8z" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.664630 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c464b1b6-988b-430b-b6ac-6b5110888de8-config\") pod \"machine-api-operator-5694c8668f-vhjzg\" (UID: \"c464b1b6-988b-430b-b6ac-6b5110888de8\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vhjzg" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.664697 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-trusted-ca\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.664724 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5912e701-8be4-4f6b-94a0-6ab69f81cef5-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-krv84\" (UID: \"5912e701-8be4-4f6b-94a0-6ab69f81cef5\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-krv84" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.664761 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d606b975-0bde-4cfa-b190-86b7da1df764-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-q7sg8\" (UID: \"d606b975-0bde-4cfa-b190-86b7da1df764\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-q7sg8" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.664786 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/c464b1b6-988b-430b-b6ac-6b5110888de8-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-vhjzg\" (UID: \"c464b1b6-988b-430b-b6ac-6b5110888de8\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vhjzg" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.664811 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/eb03f4b9-cf07-48bd-98a6-f0b9dfdc4e7b-metrics-tls\") pod \"ingress-operator-5b745b69d9-4nzhc\" (UID: \"eb03f4b9-cf07-48bd-98a6-f0b9dfdc4e7b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4nzhc" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.664844 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/10e58b8d-5283-40d1-9707-d844963263fe-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-rlq4k\" (UID: \"10e58b8d-5283-40d1-9707-d844963263fe\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rlq4k" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.664866 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/64098d81-ada3-4e0d-ac74-3d94b7247437-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-bdb45\" (UID: \"64098d81-ada3-4e0d-ac74-3d94b7247437\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bdb45" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.664930 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-registry-certificates\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.664953 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f34856c6-ef13-4ff0-95b4-8a9f6b88b729-serving-cert\") pod \"apiserver-7bbb656c7d-5hvjb\" (UID: \"f34856c6-ef13-4ff0-95b4-8a9f6b88b729\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5hvjb" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.664977 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/c464b1b6-988b-430b-b6ac-6b5110888de8-images\") pod \"machine-api-operator-5694c8668f-vhjzg\" (UID: \"c464b1b6-988b-430b-b6ac-6b5110888de8\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vhjzg" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.665011 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hlltc\" (UniqueName: \"kubernetes.io/projected/f34856c6-ef13-4ff0-95b4-8a9f6b88b729-kube-api-access-hlltc\") pod \"apiserver-7bbb656c7d-5hvjb\" (UID: \"f34856c6-ef13-4ff0-95b4-8a9f6b88b729\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5hvjb" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.665035 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a4854090-19c4-4d8a-9023-1f3ae3fac5d5-serving-cert\") pod \"console-operator-58897d9998-p2x8c\" (UID: \"a4854090-19c4-4d8a-9023-1f3ae3fac5d5\") " pod="openshift-console-operator/console-operator-58897d9998-p2x8c" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.665057 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/594e4cd2-32ab-4150-b5b3-4d167c35fb84-apiservice-cert\") pod \"packageserver-d55dfcdfc-6rfsb\" (UID: \"594e4cd2-32ab-4150-b5b3-4d167c35fb84\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6rfsb" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.665079 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eaeeed45-4bb5-446f-8afb-67ae1c8532c8-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-2kdq6\" (UID: \"eaeeed45-4bb5-446f-8afb-67ae1c8532c8\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2kdq6" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.665138 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/b5254b1e-3543-4304-b361-d8419d09548e-etcd-service-ca\") pod \"etcd-operator-b45778765-mvn6l\" (UID: \"b5254b1e-3543-4304-b361-d8419d09548e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mvn6l" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.665177 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wnglk\" (UniqueName: \"kubernetes.io/projected/9117eb3b-0187-4957-b851-da9e4c229c8f-kube-api-access-wnglk\") pod \"marketplace-operator-79b997595-mz9k6\" (UID: \"9117eb3b-0187-4957-b851-da9e4c229c8f\") " pod="openshift-marketplace/marketplace-operator-79b997595-mz9k6" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.665226 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zqknn\" (UniqueName: \"kubernetes.io/projected/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-kube-api-access-zqknn\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.665267 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b5254b1e-3543-4304-b361-d8419d09548e-serving-cert\") pod \"etcd-operator-b45778765-mvn6l\" (UID: \"b5254b1e-3543-4304-b361-d8419d09548e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mvn6l" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.665300 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/f34856c6-ef13-4ff0-95b4-8a9f6b88b729-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-5hvjb\" (UID: \"f34856c6-ef13-4ff0-95b4-8a9f6b88b729\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5hvjb" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.665323 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4854090-19c4-4d8a-9023-1f3ae3fac5d5-config\") pod \"console-operator-58897d9998-p2x8c\" (UID: \"a4854090-19c4-4d8a-9023-1f3ae3fac5d5\") " pod="openshift-console-operator/console-operator-58897d9998-p2x8c" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.665360 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gzp7v\" (UniqueName: \"kubernetes.io/projected/30a1a0d5-62ce-4910-a239-58d552e12e59-kube-api-access-gzp7v\") pod \"machine-config-controller-84d6567774-542js\" (UID: \"30a1a0d5-62ce-4910-a239-58d552e12e59\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-542js" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.665383 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fxv2v\" (UniqueName: \"kubernetes.io/projected/eb03f4b9-cf07-48bd-98a6-f0b9dfdc4e7b-kube-api-access-fxv2v\") pod \"ingress-operator-5b745b69d9-4nzhc\" (UID: \"eb03f4b9-cf07-48bd-98a6-f0b9dfdc4e7b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4nzhc" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.665403 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c06ba819-662b-4eaa-93f8-ea5e462d5aec-profile-collector-cert\") pod \"olm-operator-6b444d44fb-r5wdv\" (UID: \"c06ba819-662b-4eaa-93f8-ea5e462d5aec\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r5wdv" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.665462 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.665506 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sdrwj\" (UniqueName: \"kubernetes.io/projected/dc1611b6-bd24-4ae5-a332-3a336786b9d5-kube-api-access-sdrwj\") pod \"dns-operator-744455d44c-52bvk\" (UID: \"dc1611b6-bd24-4ae5-a332-3a336786b9d5\") " pod="openshift-dns-operator/dns-operator-744455d44c-52bvk" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.665995 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7ea5d904-b5da-4a4a-9221-f808841b0052-config-volume\") pod \"collect-profiles-29401050-jck8z\" (UID: \"7ea5d904-b5da-4a4a-9221-f808841b0052\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-jck8z" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.666026 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z9r7k\" (UniqueName: \"kubernetes.io/projected/c464b1b6-988b-430b-b6ac-6b5110888de8-kube-api-access-z9r7k\") pod \"machine-api-operator-5694c8668f-vhjzg\" (UID: \"c464b1b6-988b-430b-b6ac-6b5110888de8\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vhjzg" Nov 25 09:38:10 crc kubenswrapper[4854]: E1125 09:38:10.666063 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:11.166046987 +0000 UTC m=+97.019040453 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.666111 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-installation-pull-secrets\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.666137 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rhpmr\" (UniqueName: \"kubernetes.io/projected/243c3e75-c67a-4dcf-b76d-bc1920af0a41-kube-api-access-rhpmr\") pod \"route-controller-manager-6576b87f9c-6qzzx\" (UID: \"243c3e75-c67a-4dcf-b76d-bc1920af0a41\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6qzzx" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.666194 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/10e58b8d-5283-40d1-9707-d844963263fe-config\") pod \"kube-apiserver-operator-766d6c64bb-rlq4k\" (UID: \"10e58b8d-5283-40d1-9707-d844963263fe\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rlq4k" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.666218 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/43b016d4-5802-4d1b-a99f-bc7728d7162f-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-nqm2j\" (UID: \"43b016d4-5802-4d1b-a99f-bc7728d7162f\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-nqm2j" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.666243 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/43b016d4-5802-4d1b-a99f-bc7728d7162f-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-nqm2j\" (UID: \"43b016d4-5802-4d1b-a99f-bc7728d7162f\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-nqm2j" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.666269 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-drjpl\" (UniqueName: \"kubernetes.io/projected/eaeeed45-4bb5-446f-8afb-67ae1c8532c8-kube-api-access-drjpl\") pod \"openshift-controller-manager-operator-756b6f6bc6-2kdq6\" (UID: \"eaeeed45-4bb5-446f-8afb-67ae1c8532c8\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2kdq6" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.666294 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/f34856c6-ef13-4ff0-95b4-8a9f6b88b729-audit-policies\") pod \"apiserver-7bbb656c7d-5hvjb\" (UID: \"f34856c6-ef13-4ff0-95b4-8a9f6b88b729\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5hvjb" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.666315 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d4de16b-3a82-47e6-aab5-e41ab8f871fb-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-cd58s\" (UID: \"8d4de16b-3a82-47e6-aab5-e41ab8f871fb\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-cd58s" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.666352 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f34856c6-ef13-4ff0-95b4-8a9f6b88b729-audit-dir\") pod \"apiserver-7bbb656c7d-5hvjb\" (UID: \"f34856c6-ef13-4ff0-95b4-8a9f6b88b729\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5hvjb" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.685972 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.704525 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.724148 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.747124 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.750137 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-sb8md"] Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.764197 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.768401 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.769084 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/3519fd59-259f-49f7-875d-4080e17ffe6f-images\") pod \"machine-config-operator-74547568cd-8q9m5\" (UID: \"3519fd59-259f-49f7-875d-4080e17ffe6f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8q9m5" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.769125 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00a6c4d0-b375-4cf1-af7f-a9a27621d050-config\") pod \"authentication-operator-69f744f599-t99fd\" (UID: \"00a6c4d0-b375-4cf1-af7f-a9a27621d050\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-t99fd" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.769151 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/f34856c6-ef13-4ff0-95b4-8a9f6b88b729-audit-policies\") pod \"apiserver-7bbb656c7d-5hvjb\" (UID: \"f34856c6-ef13-4ff0-95b4-8a9f6b88b729\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5hvjb" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.769175 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f34856c6-ef13-4ff0-95b4-8a9f6b88b729-audit-dir\") pod \"apiserver-7bbb656c7d-5hvjb\" (UID: \"f34856c6-ef13-4ff0-95b4-8a9f6b88b729\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5hvjb" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.769195 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d4de16b-3a82-47e6-aab5-e41ab8f871fb-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-cd58s\" (UID: \"8d4de16b-3a82-47e6-aab5-e41ab8f871fb\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-cd58s" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.769214 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/f34856c6-ef13-4ff0-95b4-8a9f6b88b729-encryption-config\") pod \"apiserver-7bbb656c7d-5hvjb\" (UID: \"f34856c6-ef13-4ff0-95b4-8a9f6b88b729\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5hvjb" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.769251 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8knk7\" (UniqueName: \"kubernetes.io/projected/10866420-6a3f-42b5-b416-fa3e70f94a20-kube-api-access-8knk7\") pod \"catalog-operator-68c6474976-hm6rl\" (UID: \"10866420-6a3f-42b5-b416-fa3e70f94a20\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hm6rl" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.769274 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/3519fd59-259f-49f7-875d-4080e17ffe6f-proxy-tls\") pod \"machine-config-operator-74547568cd-8q9m5\" (UID: \"3519fd59-259f-49f7-875d-4080e17ffe6f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8q9m5" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.769299 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/30a1a0d5-62ce-4910-a239-58d552e12e59-proxy-tls\") pod \"machine-config-controller-84d6567774-542js\" (UID: \"30a1a0d5-62ce-4910-a239-58d552e12e59\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-542js" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.769318 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/1e52d13d-15dd-437f-8d2d-88709419d1f2-stats-auth\") pod \"router-default-5444994796-vc2n9\" (UID: \"1e52d13d-15dd-437f-8d2d-88709419d1f2\") " pod="openshift-ingress/router-default-5444994796-vc2n9" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.769343 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-bound-sa-token\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.770196 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f34856c6-ef13-4ff0-95b4-8a9f6b88b729-audit-dir\") pod \"apiserver-7bbb656c7d-5hvjb\" (UID: \"f34856c6-ef13-4ff0-95b4-8a9f6b88b729\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5hvjb" Nov 25 09:38:10 crc kubenswrapper[4854]: E1125 09:38:10.770286 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:11.270260324 +0000 UTC m=+97.123253730 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.770928 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/f34856c6-ef13-4ff0-95b4-8a9f6b88b729-audit-policies\") pod \"apiserver-7bbb656c7d-5hvjb\" (UID: \"f34856c6-ef13-4ff0-95b4-8a9f6b88b729\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5hvjb" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.771017 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d4de16b-3a82-47e6-aab5-e41ab8f871fb-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-cd58s\" (UID: \"8d4de16b-3a82-47e6-aab5-e41ab8f871fb\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-cd58s" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.771107 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/9117eb3b-0187-4957-b851-da9e4c229c8f-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-mz9k6\" (UID: \"9117eb3b-0187-4957-b851-da9e4c229c8f\") " pod="openshift-marketplace/marketplace-operator-79b997595-mz9k6" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.771133 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/dc1611b6-bd24-4ae5-a332-3a336786b9d5-metrics-tls\") pod \"dns-operator-744455d44c-52bvk\" (UID: \"dc1611b6-bd24-4ae5-a332-3a336786b9d5\") " pod="openshift-dns-operator/dns-operator-744455d44c-52bvk" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.771156 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jcj2g\" (UniqueName: \"kubernetes.io/projected/7ea5d904-b5da-4a4a-9221-f808841b0052-kube-api-access-jcj2g\") pod \"collect-profiles-29401050-jck8z\" (UID: \"7ea5d904-b5da-4a4a-9221-f808841b0052\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-jck8z" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.771177 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d606b975-0bde-4cfa-b190-86b7da1df764-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-q7sg8\" (UID: \"d606b975-0bde-4cfa-b190-86b7da1df764\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-q7sg8" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.771637 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/708f81b5-e3db-4346-9fd5-025ded50d4aa-cert\") pod \"ingress-canary-kjgl8\" (UID: \"708f81b5-e3db-4346-9fd5-025ded50d4aa\") " pod="openshift-ingress-canary/ingress-canary-kjgl8" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.771687 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/243c3e75-c67a-4dcf-b76d-bc1920af0a41-client-ca\") pod \"route-controller-manager-6576b87f9c-6qzzx\" (UID: \"243c3e75-c67a-4dcf-b76d-bc1920af0a41\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6qzzx" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.771714 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a4854090-19c4-4d8a-9023-1f3ae3fac5d5-trusted-ca\") pod \"console-operator-58897d9998-p2x8c\" (UID: \"a4854090-19c4-4d8a-9023-1f3ae3fac5d5\") " pod="openshift-console-operator/console-operator-58897d9998-p2x8c" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.771735 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-ca-trust-extracted\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.771765 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rxl4g\" (UniqueName: \"kubernetes.io/projected/c06ba819-662b-4eaa-93f8-ea5e462d5aec-kube-api-access-rxl4g\") pod \"olm-operator-6b444d44fb-r5wdv\" (UID: \"c06ba819-662b-4eaa-93f8-ea5e462d5aec\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r5wdv" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.771788 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8d4de16b-3a82-47e6-aab5-e41ab8f871fb-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-cd58s\" (UID: \"8d4de16b-3a82-47e6-aab5-e41ab8f871fb\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-cd58s" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.771806 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5254b1e-3543-4304-b361-d8419d09548e-config\") pod \"etcd-operator-b45778765-mvn6l\" (UID: \"b5254b1e-3543-4304-b361-d8419d09548e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mvn6l" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.771828 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/4b64fc1e-475a-4d69-a7ac-a23a7b5a7909-csi-data-dir\") pod \"csi-hostpathplugin-pxbm7\" (UID: \"4b64fc1e-475a-4d69-a7ac-a23a7b5a7909\") " pod="hostpath-provisioner/csi-hostpathplugin-pxbm7" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.771846 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-78qx7\" (UniqueName: \"kubernetes.io/projected/708f81b5-e3db-4346-9fd5-025ded50d4aa-kube-api-access-78qx7\") pod \"ingress-canary-kjgl8\" (UID: \"708f81b5-e3db-4346-9fd5-025ded50d4aa\") " pod="openshift-ingress-canary/ingress-canary-kjgl8" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.771855 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d606b975-0bde-4cfa-b190-86b7da1df764-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-q7sg8\" (UID: \"d606b975-0bde-4cfa-b190-86b7da1df764\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-q7sg8" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.771869 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/97e545f8-81c2-400b-a339-b2b3a1958492-service-ca\") pod \"console-f9d7485db-m5689\" (UID: \"97e545f8-81c2-400b-a339-b2b3a1958492\") " pod="openshift-console/console-f9d7485db-m5689" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.771888 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/97e545f8-81c2-400b-a339-b2b3a1958492-oauth-serving-cert\") pod \"console-f9d7485db-m5689\" (UID: \"97e545f8-81c2-400b-a339-b2b3a1958492\") " pod="openshift-console/console-f9d7485db-m5689" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.771913 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/243c3e75-c67a-4dcf-b76d-bc1920af0a41-config\") pod \"route-controller-manager-6576b87f9c-6qzzx\" (UID: \"243c3e75-c67a-4dcf-b76d-bc1920af0a41\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6qzzx" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.771933 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/eaeeed45-4bb5-446f-8afb-67ae1c8532c8-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-2kdq6\" (UID: \"eaeeed45-4bb5-446f-8afb-67ae1c8532c8\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2kdq6" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.771953 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/64098d81-ada3-4e0d-ac74-3d94b7247437-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-bdb45\" (UID: \"64098d81-ada3-4e0d-ac74-3d94b7247437\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bdb45" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.771973 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/30a1a0d5-62ce-4910-a239-58d552e12e59-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-542js\" (UID: \"30a1a0d5-62ce-4910-a239-58d552e12e59\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-542js" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.771996 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/b236ed1d-4b1e-4910-9df1-0db7353a28c5-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-chcz8\" (UID: \"b236ed1d-4b1e-4910-9df1-0db7353a28c5\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-chcz8" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772020 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/268f8834-ad43-4d58-965a-9edddd43ad54-node-bootstrap-token\") pod \"machine-config-server-vfjfs\" (UID: \"268f8834-ad43-4d58-965a-9edddd43ad54\") " pod="openshift-machine-config-operator/machine-config-server-vfjfs" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772046 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/f34856c6-ef13-4ff0-95b4-8a9f6b88b729-etcd-client\") pod \"apiserver-7bbb656c7d-5hvjb\" (UID: \"f34856c6-ef13-4ff0-95b4-8a9f6b88b729\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5hvjb" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772067 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/43b016d4-5802-4d1b-a99f-bc7728d7162f-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-nqm2j\" (UID: \"43b016d4-5802-4d1b-a99f-bc7728d7162f\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-nqm2j" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772090 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-62lvw\" (UniqueName: \"kubernetes.io/projected/43b016d4-5802-4d1b-a99f-bc7728d7162f-kube-api-access-62lvw\") pod \"cluster-image-registry-operator-dc59b4c8b-nqm2j\" (UID: \"43b016d4-5802-4d1b-a99f-bc7728d7162f\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-nqm2j" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772111 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/4293c24a-f631-41fb-9065-b157c62cd0d3-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-6c7r5\" (UID: \"4293c24a-f631-41fb-9065-b157c62cd0d3\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-6c7r5" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772133 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f34856c6-ef13-4ff0-95b4-8a9f6b88b729-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-5hvjb\" (UID: \"f34856c6-ef13-4ff0-95b4-8a9f6b88b729\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5hvjb" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772152 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/10e58b8d-5283-40d1-9707-d844963263fe-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-rlq4k\" (UID: \"10e58b8d-5283-40d1-9707-d844963263fe\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rlq4k" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772175 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/4b64fc1e-475a-4d69-a7ac-a23a7b5a7909-registration-dir\") pod \"csi-hostpathplugin-pxbm7\" (UID: \"4b64fc1e-475a-4d69-a7ac-a23a7b5a7909\") " pod="hostpath-provisioner/csi-hostpathplugin-pxbm7" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772196 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/24be6029-f079-49a0-904e-6f072eafba7e-signing-cabundle\") pod \"service-ca-9c57cc56f-h5cbk\" (UID: \"24be6029-f079-49a0-904e-6f072eafba7e\") " pod="openshift-service-ca/service-ca-9c57cc56f-h5cbk" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772226 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8jm8p\" (UniqueName: \"kubernetes.io/projected/c49abfb3-cae2-48dc-93ef-0fb8d3853caf-kube-api-access-8jm8p\") pod \"migrator-59844c95c7-452s8\" (UID: \"c49abfb3-cae2-48dc-93ef-0fb8d3853caf\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-452s8" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772248 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/b5254b1e-3543-4304-b361-d8419d09548e-etcd-ca\") pod \"etcd-operator-b45778765-mvn6l\" (UID: \"b5254b1e-3543-4304-b361-d8419d09548e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mvn6l" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772268 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-flhsn\" (UniqueName: \"kubernetes.io/projected/b5254b1e-3543-4304-b361-d8419d09548e-kube-api-access-flhsn\") pod \"etcd-operator-b45778765-mvn6l\" (UID: \"b5254b1e-3543-4304-b361-d8419d09548e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mvn6l" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772292 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mwvkl\" (UniqueName: \"kubernetes.io/projected/b236ed1d-4b1e-4910-9df1-0db7353a28c5-kube-api-access-mwvkl\") pod \"control-plane-machine-set-operator-78cbb6b69f-chcz8\" (UID: \"b236ed1d-4b1e-4910-9df1-0db7353a28c5\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-chcz8" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772316 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/00a6c4d0-b375-4cf1-af7f-a9a27621d050-serving-cert\") pod \"authentication-operator-69f744f599-t99fd\" (UID: \"00a6c4d0-b375-4cf1-af7f-a9a27621d050\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-t99fd" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772337 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/97e545f8-81c2-400b-a339-b2b3a1958492-trusted-ca-bundle\") pod \"console-f9d7485db-m5689\" (UID: \"97e545f8-81c2-400b-a339-b2b3a1958492\") " pod="openshift-console/console-f9d7485db-m5689" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772355 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1e52d13d-15dd-437f-8d2d-88709419d1f2-metrics-certs\") pod \"router-default-5444994796-vc2n9\" (UID: \"1e52d13d-15dd-437f-8d2d-88709419d1f2\") " pod="openshift-ingress/router-default-5444994796-vc2n9" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772381 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-registry-tls\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772403 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rfxmc\" (UniqueName: \"kubernetes.io/projected/1e52d13d-15dd-437f-8d2d-88709419d1f2-kube-api-access-rfxmc\") pod \"router-default-5444994796-vc2n9\" (UID: \"1e52d13d-15dd-437f-8d2d-88709419d1f2\") " pod="openshift-ingress/router-default-5444994796-vc2n9" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772424 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d22cd363-91c5-46e0-af39-0f08466fbc92-config-volume\") pod \"dns-default-4nz5q\" (UID: \"d22cd363-91c5-46e0-af39-0f08466fbc92\") " pod="openshift-dns/dns-default-4nz5q" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772444 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/065a1559-77c2-4340-a110-4835055501a7-serving-cert\") pod \"service-ca-operator-777779d784-ln89b\" (UID: \"065a1559-77c2-4340-a110-4835055501a7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-ln89b" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772470 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c2756\" (UniqueName: \"kubernetes.io/projected/594e4cd2-32ab-4150-b5b3-4d167c35fb84-kube-api-access-c2756\") pod \"packageserver-d55dfcdfc-6rfsb\" (UID: \"594e4cd2-32ab-4150-b5b3-4d167c35fb84\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6rfsb" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772489 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/00a6c4d0-b375-4cf1-af7f-a9a27621d050-service-ca-bundle\") pod \"authentication-operator-69f744f599-t99fd\" (UID: \"00a6c4d0-b375-4cf1-af7f-a9a27621d050\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-t99fd" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772511 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q947b\" (UniqueName: \"kubernetes.io/projected/065a1559-77c2-4340-a110-4835055501a7-kube-api-access-q947b\") pod \"service-ca-operator-777779d784-ln89b\" (UID: \"065a1559-77c2-4340-a110-4835055501a7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-ln89b" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772522 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/243c3e75-c67a-4dcf-b76d-bc1920af0a41-client-ca\") pod \"route-controller-manager-6576b87f9c-6qzzx\" (UID: \"243c3e75-c67a-4dcf-b76d-bc1920af0a41\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6qzzx" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772533 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/243c3e75-c67a-4dcf-b76d-bc1920af0a41-serving-cert\") pod \"route-controller-manager-6576b87f9c-6qzzx\" (UID: \"243c3e75-c67a-4dcf-b76d-bc1920af0a41\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6qzzx" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772555 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sf2jv\" (UniqueName: \"kubernetes.io/projected/4293c24a-f631-41fb-9065-b157c62cd0d3-kube-api-access-sf2jv\") pod \"package-server-manager-789f6589d5-6c7r5\" (UID: \"4293c24a-f631-41fb-9065-b157c62cd0d3\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-6c7r5" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772575 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/594e4cd2-32ab-4150-b5b3-4d167c35fb84-webhook-cert\") pod \"packageserver-d55dfcdfc-6rfsb\" (UID: \"594e4cd2-32ab-4150-b5b3-4d167c35fb84\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6rfsb" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772595 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/24be6029-f079-49a0-904e-6f072eafba7e-signing-key\") pod \"service-ca-9c57cc56f-h5cbk\" (UID: \"24be6029-f079-49a0-904e-6f072eafba7e\") " pod="openshift-service-ca/service-ca-9c57cc56f-h5cbk" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772616 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hkdkg\" (UniqueName: \"kubernetes.io/projected/d606b975-0bde-4cfa-b190-86b7da1df764-kube-api-access-hkdkg\") pod \"kube-storage-version-migrator-operator-b67b599dd-q7sg8\" (UID: \"d606b975-0bde-4cfa-b190-86b7da1df764\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-q7sg8" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772640 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9117eb3b-0187-4957-b851-da9e4c229c8f-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-mz9k6\" (UID: \"9117eb3b-0187-4957-b851-da9e4c229c8f\") " pod="openshift-marketplace/marketplace-operator-79b997595-mz9k6" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772660 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/eb03f4b9-cf07-48bd-98a6-f0b9dfdc4e7b-trusted-ca\") pod \"ingress-operator-5b745b69d9-4nzhc\" (UID: \"eb03f4b9-cf07-48bd-98a6-f0b9dfdc4e7b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4nzhc" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772737 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/97e545f8-81c2-400b-a339-b2b3a1958492-console-oauth-config\") pod \"console-f9d7485db-m5689\" (UID: \"97e545f8-81c2-400b-a339-b2b3a1958492\") " pod="openshift-console/console-f9d7485db-m5689" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772766 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8d4de16b-3a82-47e6-aab5-e41ab8f871fb-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-cd58s\" (UID: \"8d4de16b-3a82-47e6-aab5-e41ab8f871fb\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-cd58s" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772789 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-td6m7\" (UniqueName: \"kubernetes.io/projected/5912e701-8be4-4f6b-94a0-6ab69f81cef5-kube-api-access-td6m7\") pod \"multus-admission-controller-857f4d67dd-krv84\" (UID: \"5912e701-8be4-4f6b-94a0-6ab69f81cef5\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-krv84" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772811 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/eb03f4b9-cf07-48bd-98a6-f0b9dfdc4e7b-bound-sa-token\") pod \"ingress-operator-5b745b69d9-4nzhc\" (UID: \"eb03f4b9-cf07-48bd-98a6-f0b9dfdc4e7b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4nzhc" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772845 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/64098d81-ada3-4e0d-ac74-3d94b7247437-config\") pod \"kube-controller-manager-operator-78b949d7b-bdb45\" (UID: \"64098d81-ada3-4e0d-ac74-3d94b7247437\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bdb45" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772871 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/b5254b1e-3543-4304-b361-d8419d09548e-etcd-client\") pod \"etcd-operator-b45778765-mvn6l\" (UID: \"b5254b1e-3543-4304-b361-d8419d09548e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mvn6l" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772890 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/594e4cd2-32ab-4150-b5b3-4d167c35fb84-tmpfs\") pod \"packageserver-d55dfcdfc-6rfsb\" (UID: \"594e4cd2-32ab-4150-b5b3-4d167c35fb84\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6rfsb" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.772989 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c06ba819-662b-4eaa-93f8-ea5e462d5aec-srv-cert\") pod \"olm-operator-6b444d44fb-r5wdv\" (UID: \"c06ba819-662b-4eaa-93f8-ea5e462d5aec\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r5wdv" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.773010 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mq9x9\" (UniqueName: \"kubernetes.io/projected/a4854090-19c4-4d8a-9023-1f3ae3fac5d5-kube-api-access-mq9x9\") pod \"console-operator-58897d9998-p2x8c\" (UID: \"a4854090-19c4-4d8a-9023-1f3ae3fac5d5\") " pod="openshift-console-operator/console-operator-58897d9998-p2x8c" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.773033 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7ea5d904-b5da-4a4a-9221-f808841b0052-secret-volume\") pod \"collect-profiles-29401050-jck8z\" (UID: \"7ea5d904-b5da-4a4a-9221-f808841b0052\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-jck8z" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.773055 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c464b1b6-988b-430b-b6ac-6b5110888de8-config\") pod \"machine-api-operator-5694c8668f-vhjzg\" (UID: \"c464b1b6-988b-430b-b6ac-6b5110888de8\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vhjzg" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.773077 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/d22cd363-91c5-46e0-af39-0f08466fbc92-metrics-tls\") pod \"dns-default-4nz5q\" (UID: \"d22cd363-91c5-46e0-af39-0f08466fbc92\") " pod="openshift-dns/dns-default-4nz5q" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.773104 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-trusted-ca\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.773126 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5912e701-8be4-4f6b-94a0-6ab69f81cef5-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-krv84\" (UID: \"5912e701-8be4-4f6b-94a0-6ab69f81cef5\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-krv84" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.773148 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-46cgj\" (UniqueName: \"kubernetes.io/projected/3519fd59-259f-49f7-875d-4080e17ffe6f-kube-api-access-46cgj\") pod \"machine-config-operator-74547568cd-8q9m5\" (UID: \"3519fd59-259f-49f7-875d-4080e17ffe6f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8q9m5" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.773171 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d606b975-0bde-4cfa-b190-86b7da1df764-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-q7sg8\" (UID: \"d606b975-0bde-4cfa-b190-86b7da1df764\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-q7sg8" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.773752 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/30a1a0d5-62ce-4910-a239-58d552e12e59-proxy-tls\") pod \"machine-config-controller-84d6567774-542js\" (UID: \"30a1a0d5-62ce-4910-a239-58d552e12e59\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-542js" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.773785 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5254b1e-3543-4304-b361-d8419d09548e-config\") pod \"etcd-operator-b45778765-mvn6l\" (UID: \"b5254b1e-3543-4304-b361-d8419d09548e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mvn6l" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.773882 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a4854090-19c4-4d8a-9023-1f3ae3fac5d5-trusted-ca\") pod \"console-operator-58897d9998-p2x8c\" (UID: \"a4854090-19c4-4d8a-9023-1f3ae3fac5d5\") " pod="openshift-console-operator/console-operator-58897d9998-p2x8c" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.774234 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-ca-trust-extracted\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.774618 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/594e4cd2-32ab-4150-b5b3-4d167c35fb84-tmpfs\") pod \"packageserver-d55dfcdfc-6rfsb\" (UID: \"594e4cd2-32ab-4150-b5b3-4d167c35fb84\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6rfsb" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.774759 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/f34856c6-ef13-4ff0-95b4-8a9f6b88b729-encryption-config\") pod \"apiserver-7bbb656c7d-5hvjb\" (UID: \"f34856c6-ef13-4ff0-95b4-8a9f6b88b729\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5hvjb" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.775082 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/243c3e75-c67a-4dcf-b76d-bc1920af0a41-config\") pod \"route-controller-manager-6576b87f9c-6qzzx\" (UID: \"243c3e75-c67a-4dcf-b76d-bc1920af0a41\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6qzzx" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.775153 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9117eb3b-0187-4957-b851-da9e4c229c8f-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-mz9k6\" (UID: \"9117eb3b-0187-4957-b851-da9e4c229c8f\") " pod="openshift-marketplace/marketplace-operator-79b997595-mz9k6" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.775905 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c464b1b6-988b-430b-b6ac-6b5110888de8-config\") pod \"machine-api-operator-5694c8668f-vhjzg\" (UID: \"c464b1b6-988b-430b-b6ac-6b5110888de8\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vhjzg" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.776082 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/eb03f4b9-cf07-48bd-98a6-f0b9dfdc4e7b-trusted-ca\") pod \"ingress-operator-5b745b69d9-4nzhc\" (UID: \"eb03f4b9-cf07-48bd-98a6-f0b9dfdc4e7b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4nzhc" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.776988 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/30a1a0d5-62ce-4910-a239-58d552e12e59-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-542js\" (UID: \"30a1a0d5-62ce-4910-a239-58d552e12e59\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-542js" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.777425 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/64098d81-ada3-4e0d-ac74-3d94b7247437-config\") pod \"kube-controller-manager-operator-78b949d7b-bdb45\" (UID: \"64098d81-ada3-4e0d-ac74-3d94b7247437\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bdb45" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.777859 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8d4de16b-3a82-47e6-aab5-e41ab8f871fb-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-cd58s\" (UID: \"8d4de16b-3a82-47e6-aab5-e41ab8f871fb\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-cd58s" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.778225 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f34856c6-ef13-4ff0-95b4-8a9f6b88b729-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-5hvjb\" (UID: \"f34856c6-ef13-4ff0-95b4-8a9f6b88b729\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5hvjb" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.778489 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-trusted-ca\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.778953 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/9117eb3b-0187-4957-b851-da9e4c229c8f-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-mz9k6\" (UID: \"9117eb3b-0187-4957-b851-da9e4c229c8f\") " pod="openshift-marketplace/marketplace-operator-79b997595-mz9k6" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.779022 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/dc1611b6-bd24-4ae5-a332-3a336786b9d5-metrics-tls\") pod \"dns-operator-744455d44c-52bvk\" (UID: \"dc1611b6-bd24-4ae5-a332-3a336786b9d5\") " pod="openshift-dns-operator/dns-operator-744455d44c-52bvk" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.779514 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/c464b1b6-988b-430b-b6ac-6b5110888de8-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-vhjzg\" (UID: \"c464b1b6-988b-430b-b6ac-6b5110888de8\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vhjzg" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.779549 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/268f8834-ad43-4d58-965a-9edddd43ad54-certs\") pod \"machine-config-server-vfjfs\" (UID: \"268f8834-ad43-4d58-965a-9edddd43ad54\") " pod="openshift-machine-config-operator/machine-config-server-vfjfs" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.779569 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/065a1559-77c2-4340-a110-4835055501a7-config\") pod \"service-ca-operator-777779d784-ln89b\" (UID: \"065a1559-77c2-4340-a110-4835055501a7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-ln89b" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.779610 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-698km\" (UniqueName: \"kubernetes.io/projected/d22cd363-91c5-46e0-af39-0f08466fbc92-kube-api-access-698km\") pod \"dns-default-4nz5q\" (UID: \"d22cd363-91c5-46e0-af39-0f08466fbc92\") " pod="openshift-dns/dns-default-4nz5q" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.779636 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/eb03f4b9-cf07-48bd-98a6-f0b9dfdc4e7b-metrics-tls\") pod \"ingress-operator-5b745b69d9-4nzhc\" (UID: \"eb03f4b9-cf07-48bd-98a6-f0b9dfdc4e7b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4nzhc" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.779692 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cmwnm\" (UniqueName: \"kubernetes.io/projected/268f8834-ad43-4d58-965a-9edddd43ad54-kube-api-access-cmwnm\") pod \"machine-config-server-vfjfs\" (UID: \"268f8834-ad43-4d58-965a-9edddd43ad54\") " pod="openshift-machine-config-operator/machine-config-server-vfjfs" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.779711 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/10e58b8d-5283-40d1-9707-d844963263fe-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-rlq4k\" (UID: \"10e58b8d-5283-40d1-9707-d844963263fe\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rlq4k" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.779746 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/64098d81-ada3-4e0d-ac74-3d94b7247437-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-bdb45\" (UID: \"64098d81-ada3-4e0d-ac74-3d94b7247437\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bdb45" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.779763 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/4b64fc1e-475a-4d69-a7ac-a23a7b5a7909-mountpoint-dir\") pod \"csi-hostpathplugin-pxbm7\" (UID: \"4b64fc1e-475a-4d69-a7ac-a23a7b5a7909\") " pod="hostpath-provisioner/csi-hostpathplugin-pxbm7" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.779779 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/1e52d13d-15dd-437f-8d2d-88709419d1f2-default-certificate\") pod \"router-default-5444994796-vc2n9\" (UID: \"1e52d13d-15dd-437f-8d2d-88709419d1f2\") " pod="openshift-ingress/router-default-5444994796-vc2n9" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.780321 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-registry-tls\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.780480 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/b5254b1e-3543-4304-b361-d8419d09548e-etcd-client\") pod \"etcd-operator-b45778765-mvn6l\" (UID: \"b5254b1e-3543-4304-b361-d8419d09548e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mvn6l" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.780555 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/4b64fc1e-475a-4d69-a7ac-a23a7b5a7909-plugins-dir\") pod \"csi-hostpathplugin-pxbm7\" (UID: \"4b64fc1e-475a-4d69-a7ac-a23a7b5a7909\") " pod="hostpath-provisioner/csi-hostpathplugin-pxbm7" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.780619 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/10866420-6a3f-42b5-b416-fa3e70f94a20-profile-collector-cert\") pod \"catalog-operator-68c6474976-hm6rl\" (UID: \"10866420-6a3f-42b5-b416-fa3e70f94a20\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hm6rl" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.780694 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-registry-certificates\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.780725 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f34856c6-ef13-4ff0-95b4-8a9f6b88b729-serving-cert\") pod \"apiserver-7bbb656c7d-5hvjb\" (UID: \"f34856c6-ef13-4ff0-95b4-8a9f6b88b729\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5hvjb" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.780751 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hlltc\" (UniqueName: \"kubernetes.io/projected/f34856c6-ef13-4ff0-95b4-8a9f6b88b729-kube-api-access-hlltc\") pod \"apiserver-7bbb656c7d-5hvjb\" (UID: \"f34856c6-ef13-4ff0-95b4-8a9f6b88b729\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5hvjb" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.780803 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/c464b1b6-988b-430b-b6ac-6b5110888de8-images\") pod \"machine-api-operator-5694c8668f-vhjzg\" (UID: \"c464b1b6-988b-430b-b6ac-6b5110888de8\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vhjzg" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.780823 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a4854090-19c4-4d8a-9023-1f3ae3fac5d5-serving-cert\") pod \"console-operator-58897d9998-p2x8c\" (UID: \"a4854090-19c4-4d8a-9023-1f3ae3fac5d5\") " pod="openshift-console-operator/console-operator-58897d9998-p2x8c" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.780850 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/594e4cd2-32ab-4150-b5b3-4d167c35fb84-apiservice-cert\") pod \"packageserver-d55dfcdfc-6rfsb\" (UID: \"594e4cd2-32ab-4150-b5b3-4d167c35fb84\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6rfsb" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.780867 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eaeeed45-4bb5-446f-8afb-67ae1c8532c8-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-2kdq6\" (UID: \"eaeeed45-4bb5-446f-8afb-67ae1c8532c8\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2kdq6" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.780887 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/00a6c4d0-b375-4cf1-af7f-a9a27621d050-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-t99fd\" (UID: \"00a6c4d0-b375-4cf1-af7f-a9a27621d050\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-t99fd" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.780954 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/eaeeed45-4bb5-446f-8afb-67ae1c8532c8-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-2kdq6\" (UID: \"eaeeed45-4bb5-446f-8afb-67ae1c8532c8\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2kdq6" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.781007 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/b5254b1e-3543-4304-b361-d8419d09548e-etcd-ca\") pod \"etcd-operator-b45778765-mvn6l\" (UID: \"b5254b1e-3543-4304-b361-d8419d09548e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mvn6l" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.782010 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-registry-certificates\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.782525 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eaeeed45-4bb5-446f-8afb-67ae1c8532c8-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-2kdq6\" (UID: \"eaeeed45-4bb5-446f-8afb-67ae1c8532c8\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2kdq6" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.782571 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/b5254b1e-3543-4304-b361-d8419d09548e-etcd-service-ca\") pod \"etcd-operator-b45778765-mvn6l\" (UID: \"b5254b1e-3543-4304-b361-d8419d09548e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mvn6l" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.783156 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/b5254b1e-3543-4304-b361-d8419d09548e-etcd-service-ca\") pod \"etcd-operator-b45778765-mvn6l\" (UID: \"b5254b1e-3543-4304-b361-d8419d09548e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mvn6l" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.783198 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wnglk\" (UniqueName: \"kubernetes.io/projected/9117eb3b-0187-4957-b851-da9e4c229c8f-kube-api-access-wnglk\") pod \"marketplace-operator-79b997595-mz9k6\" (UID: \"9117eb3b-0187-4957-b851-da9e4c229c8f\") " pod="openshift-marketplace/marketplace-operator-79b997595-mz9k6" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.783224 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/10866420-6a3f-42b5-b416-fa3e70f94a20-srv-cert\") pod \"catalog-operator-68c6474976-hm6rl\" (UID: \"10866420-6a3f-42b5-b416-fa3e70f94a20\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hm6rl" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.783254 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zqknn\" (UniqueName: \"kubernetes.io/projected/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-kube-api-access-zqknn\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.783371 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/594e4cd2-32ab-4150-b5b3-4d167c35fb84-webhook-cert\") pod \"packageserver-d55dfcdfc-6rfsb\" (UID: \"594e4cd2-32ab-4150-b5b3-4d167c35fb84\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6rfsb" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.783419 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/f34856c6-ef13-4ff0-95b4-8a9f6b88b729-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-5hvjb\" (UID: \"f34856c6-ef13-4ff0-95b4-8a9f6b88b729\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5hvjb" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.783444 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b5254b1e-3543-4304-b361-d8419d09548e-serving-cert\") pod \"etcd-operator-b45778765-mvn6l\" (UID: \"b5254b1e-3543-4304-b361-d8419d09548e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mvn6l" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.783470 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4854090-19c4-4d8a-9023-1f3ae3fac5d5-config\") pod \"console-operator-58897d9998-p2x8c\" (UID: \"a4854090-19c4-4d8a-9023-1f3ae3fac5d5\") " pod="openshift-console-operator/console-operator-58897d9998-p2x8c" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.783531 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gzp7v\" (UniqueName: \"kubernetes.io/projected/30a1a0d5-62ce-4910-a239-58d552e12e59-kube-api-access-gzp7v\") pod \"machine-config-controller-84d6567774-542js\" (UID: \"30a1a0d5-62ce-4910-a239-58d552e12e59\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-542js" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.783552 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fxv2v\" (UniqueName: \"kubernetes.io/projected/eb03f4b9-cf07-48bd-98a6-f0b9dfdc4e7b-kube-api-access-fxv2v\") pod \"ingress-operator-5b745b69d9-4nzhc\" (UID: \"eb03f4b9-cf07-48bd-98a6-f0b9dfdc4e7b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4nzhc" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.783578 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c06ba819-662b-4eaa-93f8-ea5e462d5aec-profile-collector-cert\") pod \"olm-operator-6b444d44fb-r5wdv\" (UID: \"c06ba819-662b-4eaa-93f8-ea5e462d5aec\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r5wdv" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.783598 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/4b64fc1e-475a-4d69-a7ac-a23a7b5a7909-socket-dir\") pod \"csi-hostpathplugin-pxbm7\" (UID: \"4b64fc1e-475a-4d69-a7ac-a23a7b5a7909\") " pod="hostpath-provisioner/csi-hostpathplugin-pxbm7" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.783662 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/97e545f8-81c2-400b-a339-b2b3a1958492-console-config\") pod \"console-f9d7485db-m5689\" (UID: \"97e545f8-81c2-400b-a339-b2b3a1958492\") " pod="openshift-console/console-f9d7485db-m5689" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.783707 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.783741 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sdrwj\" (UniqueName: \"kubernetes.io/projected/dc1611b6-bd24-4ae5-a332-3a336786b9d5-kube-api-access-sdrwj\") pod \"dns-operator-744455d44c-52bvk\" (UID: \"dc1611b6-bd24-4ae5-a332-3a336786b9d5\") " pod="openshift-dns-operator/dns-operator-744455d44c-52bvk" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.783759 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7ea5d904-b5da-4a4a-9221-f808841b0052-config-volume\") pod \"collect-profiles-29401050-jck8z\" (UID: \"7ea5d904-b5da-4a4a-9221-f808841b0052\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-jck8z" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.783775 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z9r7k\" (UniqueName: \"kubernetes.io/projected/c464b1b6-988b-430b-b6ac-6b5110888de8-kube-api-access-z9r7k\") pod \"machine-api-operator-5694c8668f-vhjzg\" (UID: \"c464b1b6-988b-430b-b6ac-6b5110888de8\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vhjzg" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.783915 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gmf52\" (UniqueName: \"kubernetes.io/projected/4b64fc1e-475a-4d69-a7ac-a23a7b5a7909-kube-api-access-gmf52\") pod \"csi-hostpathplugin-pxbm7\" (UID: \"4b64fc1e-475a-4d69-a7ac-a23a7b5a7909\") " pod="hostpath-provisioner/csi-hostpathplugin-pxbm7" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.783950 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/97e545f8-81c2-400b-a339-b2b3a1958492-console-serving-cert\") pod \"console-f9d7485db-m5689\" (UID: \"97e545f8-81c2-400b-a339-b2b3a1958492\") " pod="openshift-console/console-f9d7485db-m5689" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.783975 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mnm2q\" (UniqueName: \"kubernetes.io/projected/24be6029-f079-49a0-904e-6f072eafba7e-kube-api-access-mnm2q\") pod \"service-ca-9c57cc56f-h5cbk\" (UID: \"24be6029-f079-49a0-904e-6f072eafba7e\") " pod="openshift-service-ca/service-ca-9c57cc56f-h5cbk" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.783996 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1e52d13d-15dd-437f-8d2d-88709419d1f2-service-ca-bundle\") pod \"router-default-5444994796-vc2n9\" (UID: \"1e52d13d-15dd-437f-8d2d-88709419d1f2\") " pod="openshift-ingress/router-default-5444994796-vc2n9" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.784022 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-installation-pull-secrets\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.784051 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rhpmr\" (UniqueName: \"kubernetes.io/projected/243c3e75-c67a-4dcf-b76d-bc1920af0a41-kube-api-access-rhpmr\") pod \"route-controller-manager-6576b87f9c-6qzzx\" (UID: \"243c3e75-c67a-4dcf-b76d-bc1920af0a41\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6qzzx" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.784078 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/3519fd59-259f-49f7-875d-4080e17ffe6f-auth-proxy-config\") pod \"machine-config-operator-74547568cd-8q9m5\" (UID: \"3519fd59-259f-49f7-875d-4080e17ffe6f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8q9m5" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.784105 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/10e58b8d-5283-40d1-9707-d844963263fe-config\") pod \"kube-apiserver-operator-766d6c64bb-rlq4k\" (UID: \"10e58b8d-5283-40d1-9707-d844963263fe\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rlq4k" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.784131 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4fzcp\" (UniqueName: \"kubernetes.io/projected/00a6c4d0-b375-4cf1-af7f-a9a27621d050-kube-api-access-4fzcp\") pod \"authentication-operator-69f744f599-t99fd\" (UID: \"00a6c4d0-b375-4cf1-af7f-a9a27621d050\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-t99fd" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.784156 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/43b016d4-5802-4d1b-a99f-bc7728d7162f-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-nqm2j\" (UID: \"43b016d4-5802-4d1b-a99f-bc7728d7162f\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-nqm2j" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.784182 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/43b016d4-5802-4d1b-a99f-bc7728d7162f-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-nqm2j\" (UID: \"43b016d4-5802-4d1b-a99f-bc7728d7162f\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-nqm2j" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.784206 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wzngf\" (UniqueName: \"kubernetes.io/projected/97e545f8-81c2-400b-a339-b2b3a1958492-kube-api-access-wzngf\") pod \"console-f9d7485db-m5689\" (UID: \"97e545f8-81c2-400b-a339-b2b3a1958492\") " pod="openshift-console/console-f9d7485db-m5689" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.784897 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4854090-19c4-4d8a-9023-1f3ae3fac5d5-config\") pod \"console-operator-58897d9998-p2x8c\" (UID: \"a4854090-19c4-4d8a-9023-1f3ae3fac5d5\") " pod="openshift-console-operator/console-operator-58897d9998-p2x8c" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.785029 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/594e4cd2-32ab-4150-b5b3-4d167c35fb84-apiservice-cert\") pod \"packageserver-d55dfcdfc-6rfsb\" (UID: \"594e4cd2-32ab-4150-b5b3-4d167c35fb84\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6rfsb" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.785202 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c06ba819-662b-4eaa-93f8-ea5e462d5aec-srv-cert\") pod \"olm-operator-6b444d44fb-r5wdv\" (UID: \"c06ba819-662b-4eaa-93f8-ea5e462d5aec\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r5wdv" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.785988 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/f34856c6-ef13-4ff0-95b4-8a9f6b88b729-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-5hvjb\" (UID: \"f34856c6-ef13-4ff0-95b4-8a9f6b88b729\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5hvjb" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.786940 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/f34856c6-ef13-4ff0-95b4-8a9f6b88b729-etcd-client\") pod \"apiserver-7bbb656c7d-5hvjb\" (UID: \"f34856c6-ef13-4ff0-95b4-8a9f6b88b729\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5hvjb" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.787002 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/c464b1b6-988b-430b-b6ac-6b5110888de8-images\") pod \"machine-api-operator-5694c8668f-vhjzg\" (UID: \"c464b1b6-988b-430b-b6ac-6b5110888de8\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vhjzg" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.787177 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-drjpl\" (UniqueName: \"kubernetes.io/projected/eaeeed45-4bb5-446f-8afb-67ae1c8532c8-kube-api-access-drjpl\") pod \"openshift-controller-manager-operator-756b6f6bc6-2kdq6\" (UID: \"eaeeed45-4bb5-446f-8afb-67ae1c8532c8\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2kdq6" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.787757 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/10e58b8d-5283-40d1-9707-d844963263fe-config\") pod \"kube-apiserver-operator-766d6c64bb-rlq4k\" (UID: \"10e58b8d-5283-40d1-9707-d844963263fe\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rlq4k" Nov 25 09:38:10 crc kubenswrapper[4854]: E1125 09:38:10.788549 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:11.2885328 +0000 UTC m=+97.141526256 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.788603 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7ea5d904-b5da-4a4a-9221-f808841b0052-config-volume\") pod \"collect-profiles-29401050-jck8z\" (UID: \"7ea5d904-b5da-4a4a-9221-f808841b0052\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-jck8z" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.788909 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/43b016d4-5802-4d1b-a99f-bc7728d7162f-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-nqm2j\" (UID: \"43b016d4-5802-4d1b-a99f-bc7728d7162f\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-nqm2j" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.789376 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/10e58b8d-5283-40d1-9707-d844963263fe-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-rlq4k\" (UID: \"10e58b8d-5283-40d1-9707-d844963263fe\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rlq4k" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.789460 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/243c3e75-c67a-4dcf-b76d-bc1920af0a41-serving-cert\") pod \"route-controller-manager-6576b87f9c-6qzzx\" (UID: \"243c3e75-c67a-4dcf-b76d-bc1920af0a41\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6qzzx" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.789556 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b5254b1e-3543-4304-b361-d8419d09548e-serving-cert\") pod \"etcd-operator-b45778765-mvn6l\" (UID: \"b5254b1e-3543-4304-b361-d8419d09548e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mvn6l" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.789851 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d606b975-0bde-4cfa-b190-86b7da1df764-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-q7sg8\" (UID: \"d606b975-0bde-4cfa-b190-86b7da1df764\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-q7sg8" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.789982 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7ea5d904-b5da-4a4a-9221-f808841b0052-secret-volume\") pod \"collect-profiles-29401050-jck8z\" (UID: \"7ea5d904-b5da-4a4a-9221-f808841b0052\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-jck8z" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.790365 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/eb03f4b9-cf07-48bd-98a6-f0b9dfdc4e7b-metrics-tls\") pod \"ingress-operator-5b745b69d9-4nzhc\" (UID: \"eb03f4b9-cf07-48bd-98a6-f0b9dfdc4e7b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4nzhc" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.790787 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5912e701-8be4-4f6b-94a0-6ab69f81cef5-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-krv84\" (UID: \"5912e701-8be4-4f6b-94a0-6ab69f81cef5\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-krv84" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.791357 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/64098d81-ada3-4e0d-ac74-3d94b7247437-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-bdb45\" (UID: \"64098d81-ada3-4e0d-ac74-3d94b7247437\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bdb45" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.792491 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f34856c6-ef13-4ff0-95b4-8a9f6b88b729-serving-cert\") pod \"apiserver-7bbb656c7d-5hvjb\" (UID: \"f34856c6-ef13-4ff0-95b4-8a9f6b88b729\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5hvjb" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.792609 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c06ba819-662b-4eaa-93f8-ea5e462d5aec-profile-collector-cert\") pod \"olm-operator-6b444d44fb-r5wdv\" (UID: \"c06ba819-662b-4eaa-93f8-ea5e462d5aec\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r5wdv" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.792691 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/43b016d4-5802-4d1b-a99f-bc7728d7162f-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-nqm2j\" (UID: \"43b016d4-5802-4d1b-a99f-bc7728d7162f\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-nqm2j" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.792892 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/c464b1b6-988b-430b-b6ac-6b5110888de8-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-vhjzg\" (UID: \"c464b1b6-988b-430b-b6ac-6b5110888de8\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vhjzg" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.792991 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a4854090-19c4-4d8a-9023-1f3ae3fac5d5-serving-cert\") pod \"console-operator-58897d9998-p2x8c\" (UID: \"a4854090-19c4-4d8a-9023-1f3ae3fac5d5\") " pod="openshift-console-operator/console-operator-58897d9998-p2x8c" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.793836 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-installation-pull-secrets\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.821505 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-bound-sa-token\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.830848 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-45d9l"] Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.841794 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rxl4g\" (UniqueName: \"kubernetes.io/projected/c06ba819-662b-4eaa-93f8-ea5e462d5aec-kube-api-access-rxl4g\") pod \"olm-operator-6b444d44fb-r5wdv\" (UID: \"c06ba819-662b-4eaa-93f8-ea5e462d5aec\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r5wdv" Nov 25 09:38:10 crc kubenswrapper[4854]: W1125 09:38:10.858496 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod44450497_9e24_4702_8614_894197881904.slice/crio-badcc0ece09f93e8f9b095dde4407e8320ecd74723345dc69ace3233da53d7c1 WatchSource:0}: Error finding container badcc0ece09f93e8f9b095dde4407e8320ecd74723345dc69ace3233da53d7c1: Status 404 returned error can't find the container with id badcc0ece09f93e8f9b095dde4407e8320ecd74723345dc69ace3233da53d7c1 Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.861151 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8d4de16b-3a82-47e6-aab5-e41ab8f871fb-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-cd58s\" (UID: \"8d4de16b-3a82-47e6-aab5-e41ab8f871fb\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-cd58s" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.880376 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jcj2g\" (UniqueName: \"kubernetes.io/projected/7ea5d904-b5da-4a4a-9221-f808841b0052-kube-api-access-jcj2g\") pod \"collect-profiles-29401050-jck8z\" (UID: \"7ea5d904-b5da-4a4a-9221-f808841b0052\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-jck8z" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.888927 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.889298 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/10866420-6a3f-42b5-b416-fa3e70f94a20-profile-collector-cert\") pod \"catalog-operator-68c6474976-hm6rl\" (UID: \"10866420-6a3f-42b5-b416-fa3e70f94a20\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hm6rl" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.889416 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/00a6c4d0-b375-4cf1-af7f-a9a27621d050-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-t99fd\" (UID: \"00a6c4d0-b375-4cf1-af7f-a9a27621d050\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-t99fd" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.889459 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/10866420-6a3f-42b5-b416-fa3e70f94a20-srv-cert\") pod \"catalog-operator-68c6474976-hm6rl\" (UID: \"10866420-6a3f-42b5-b416-fa3e70f94a20\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hm6rl" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.889540 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/4b64fc1e-475a-4d69-a7ac-a23a7b5a7909-socket-dir\") pod \"csi-hostpathplugin-pxbm7\" (UID: \"4b64fc1e-475a-4d69-a7ac-a23a7b5a7909\") " pod="hostpath-provisioner/csi-hostpathplugin-pxbm7" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.889589 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/97e545f8-81c2-400b-a339-b2b3a1958492-console-config\") pod \"console-f9d7485db-m5689\" (UID: \"97e545f8-81c2-400b-a339-b2b3a1958492\") " pod="openshift-console/console-f9d7485db-m5689" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.889655 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gmf52\" (UniqueName: \"kubernetes.io/projected/4b64fc1e-475a-4d69-a7ac-a23a7b5a7909-kube-api-access-gmf52\") pod \"csi-hostpathplugin-pxbm7\" (UID: \"4b64fc1e-475a-4d69-a7ac-a23a7b5a7909\") " pod="hostpath-provisioner/csi-hostpathplugin-pxbm7" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.889701 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/97e545f8-81c2-400b-a339-b2b3a1958492-console-serving-cert\") pod \"console-f9d7485db-m5689\" (UID: \"97e545f8-81c2-400b-a339-b2b3a1958492\") " pod="openshift-console/console-f9d7485db-m5689" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.889730 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mnm2q\" (UniqueName: \"kubernetes.io/projected/24be6029-f079-49a0-904e-6f072eafba7e-kube-api-access-mnm2q\") pod \"service-ca-9c57cc56f-h5cbk\" (UID: \"24be6029-f079-49a0-904e-6f072eafba7e\") " pod="openshift-service-ca/service-ca-9c57cc56f-h5cbk" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.889758 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1e52d13d-15dd-437f-8d2d-88709419d1f2-service-ca-bundle\") pod \"router-default-5444994796-vc2n9\" (UID: \"1e52d13d-15dd-437f-8d2d-88709419d1f2\") " pod="openshift-ingress/router-default-5444994796-vc2n9" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.889792 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/3519fd59-259f-49f7-875d-4080e17ffe6f-auth-proxy-config\") pod \"machine-config-operator-74547568cd-8q9m5\" (UID: \"3519fd59-259f-49f7-875d-4080e17ffe6f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8q9m5" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.889819 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4fzcp\" (UniqueName: \"kubernetes.io/projected/00a6c4d0-b375-4cf1-af7f-a9a27621d050-kube-api-access-4fzcp\") pod \"authentication-operator-69f744f599-t99fd\" (UID: \"00a6c4d0-b375-4cf1-af7f-a9a27621d050\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-t99fd" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.889851 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wzngf\" (UniqueName: \"kubernetes.io/projected/97e545f8-81c2-400b-a339-b2b3a1958492-kube-api-access-wzngf\") pod \"console-f9d7485db-m5689\" (UID: \"97e545f8-81c2-400b-a339-b2b3a1958492\") " pod="openshift-console/console-f9d7485db-m5689" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.889888 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/3519fd59-259f-49f7-875d-4080e17ffe6f-images\") pod \"machine-config-operator-74547568cd-8q9m5\" (UID: \"3519fd59-259f-49f7-875d-4080e17ffe6f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8q9m5" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.889913 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00a6c4d0-b375-4cf1-af7f-a9a27621d050-config\") pod \"authentication-operator-69f744f599-t99fd\" (UID: \"00a6c4d0-b375-4cf1-af7f-a9a27621d050\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-t99fd" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.890053 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8knk7\" (UniqueName: \"kubernetes.io/projected/10866420-6a3f-42b5-b416-fa3e70f94a20-kube-api-access-8knk7\") pod \"catalog-operator-68c6474976-hm6rl\" (UID: \"10866420-6a3f-42b5-b416-fa3e70f94a20\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hm6rl" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.890092 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/3519fd59-259f-49f7-875d-4080e17ffe6f-proxy-tls\") pod \"machine-config-operator-74547568cd-8q9m5\" (UID: \"3519fd59-259f-49f7-875d-4080e17ffe6f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8q9m5" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.890122 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/1e52d13d-15dd-437f-8d2d-88709419d1f2-stats-auth\") pod \"router-default-5444994796-vc2n9\" (UID: \"1e52d13d-15dd-437f-8d2d-88709419d1f2\") " pod="openshift-ingress/router-default-5444994796-vc2n9" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.890150 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/708f81b5-e3db-4346-9fd5-025ded50d4aa-cert\") pod \"ingress-canary-kjgl8\" (UID: \"708f81b5-e3db-4346-9fd5-025ded50d4aa\") " pod="openshift-ingress-canary/ingress-canary-kjgl8" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.890185 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/4b64fc1e-475a-4d69-a7ac-a23a7b5a7909-csi-data-dir\") pod \"csi-hostpathplugin-pxbm7\" (UID: \"4b64fc1e-475a-4d69-a7ac-a23a7b5a7909\") " pod="hostpath-provisioner/csi-hostpathplugin-pxbm7" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.890210 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-78qx7\" (UniqueName: \"kubernetes.io/projected/708f81b5-e3db-4346-9fd5-025ded50d4aa-kube-api-access-78qx7\") pod \"ingress-canary-kjgl8\" (UID: \"708f81b5-e3db-4346-9fd5-025ded50d4aa\") " pod="openshift-ingress-canary/ingress-canary-kjgl8" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.890236 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/97e545f8-81c2-400b-a339-b2b3a1958492-service-ca\") pod \"console-f9d7485db-m5689\" (UID: \"97e545f8-81c2-400b-a339-b2b3a1958492\") " pod="openshift-console/console-f9d7485db-m5689" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.890270 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/97e545f8-81c2-400b-a339-b2b3a1958492-oauth-serving-cert\") pod \"console-f9d7485db-m5689\" (UID: \"97e545f8-81c2-400b-a339-b2b3a1958492\") " pod="openshift-console/console-f9d7485db-m5689" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.890304 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/b236ed1d-4b1e-4910-9df1-0db7353a28c5-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-chcz8\" (UID: \"b236ed1d-4b1e-4910-9df1-0db7353a28c5\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-chcz8" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.890334 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/268f8834-ad43-4d58-965a-9edddd43ad54-node-bootstrap-token\") pod \"machine-config-server-vfjfs\" (UID: \"268f8834-ad43-4d58-965a-9edddd43ad54\") " pod="openshift-machine-config-operator/machine-config-server-vfjfs" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.890357 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/4293c24a-f631-41fb-9065-b157c62cd0d3-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-6c7r5\" (UID: \"4293c24a-f631-41fb-9065-b157c62cd0d3\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-6c7r5" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.890409 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/4b64fc1e-475a-4d69-a7ac-a23a7b5a7909-registration-dir\") pod \"csi-hostpathplugin-pxbm7\" (UID: \"4b64fc1e-475a-4d69-a7ac-a23a7b5a7909\") " pod="hostpath-provisioner/csi-hostpathplugin-pxbm7" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.890440 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/24be6029-f079-49a0-904e-6f072eafba7e-signing-cabundle\") pod \"service-ca-9c57cc56f-h5cbk\" (UID: \"24be6029-f079-49a0-904e-6f072eafba7e\") " pod="openshift-service-ca/service-ca-9c57cc56f-h5cbk" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.890490 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/00a6c4d0-b375-4cf1-af7f-a9a27621d050-serving-cert\") pod \"authentication-operator-69f744f599-t99fd\" (UID: \"00a6c4d0-b375-4cf1-af7f-a9a27621d050\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-t99fd" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.890519 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8jm8p\" (UniqueName: \"kubernetes.io/projected/c49abfb3-cae2-48dc-93ef-0fb8d3853caf-kube-api-access-8jm8p\") pod \"migrator-59844c95c7-452s8\" (UID: \"c49abfb3-cae2-48dc-93ef-0fb8d3853caf\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-452s8" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.890559 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mwvkl\" (UniqueName: \"kubernetes.io/projected/b236ed1d-4b1e-4910-9df1-0db7353a28c5-kube-api-access-mwvkl\") pod \"control-plane-machine-set-operator-78cbb6b69f-chcz8\" (UID: \"b236ed1d-4b1e-4910-9df1-0db7353a28c5\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-chcz8" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.890587 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/97e545f8-81c2-400b-a339-b2b3a1958492-trusted-ca-bundle\") pod \"console-f9d7485db-m5689\" (UID: \"97e545f8-81c2-400b-a339-b2b3a1958492\") " pod="openshift-console/console-f9d7485db-m5689" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.890615 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1e52d13d-15dd-437f-8d2d-88709419d1f2-metrics-certs\") pod \"router-default-5444994796-vc2n9\" (UID: \"1e52d13d-15dd-437f-8d2d-88709419d1f2\") " pod="openshift-ingress/router-default-5444994796-vc2n9" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.890643 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d22cd363-91c5-46e0-af39-0f08466fbc92-config-volume\") pod \"dns-default-4nz5q\" (UID: \"d22cd363-91c5-46e0-af39-0f08466fbc92\") " pod="openshift-dns/dns-default-4nz5q" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.890690 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rfxmc\" (UniqueName: \"kubernetes.io/projected/1e52d13d-15dd-437f-8d2d-88709419d1f2-kube-api-access-rfxmc\") pod \"router-default-5444994796-vc2n9\" (UID: \"1e52d13d-15dd-437f-8d2d-88709419d1f2\") " pod="openshift-ingress/router-default-5444994796-vc2n9" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.890724 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/00a6c4d0-b375-4cf1-af7f-a9a27621d050-service-ca-bundle\") pod \"authentication-operator-69f744f599-t99fd\" (UID: \"00a6c4d0-b375-4cf1-af7f-a9a27621d050\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-t99fd" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.890752 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/065a1559-77c2-4340-a110-4835055501a7-serving-cert\") pod \"service-ca-operator-777779d784-ln89b\" (UID: \"065a1559-77c2-4340-a110-4835055501a7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-ln89b" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.890794 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q947b\" (UniqueName: \"kubernetes.io/projected/065a1559-77c2-4340-a110-4835055501a7-kube-api-access-q947b\") pod \"service-ca-operator-777779d784-ln89b\" (UID: \"065a1559-77c2-4340-a110-4835055501a7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-ln89b" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.890822 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sf2jv\" (UniqueName: \"kubernetes.io/projected/4293c24a-f631-41fb-9065-b157c62cd0d3-kube-api-access-sf2jv\") pod \"package-server-manager-789f6589d5-6c7r5\" (UID: \"4293c24a-f631-41fb-9065-b157c62cd0d3\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-6c7r5" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.890856 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/24be6029-f079-49a0-904e-6f072eafba7e-signing-key\") pod \"service-ca-9c57cc56f-h5cbk\" (UID: \"24be6029-f079-49a0-904e-6f072eafba7e\") " pod="openshift-service-ca/service-ca-9c57cc56f-h5cbk" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.890901 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/97e545f8-81c2-400b-a339-b2b3a1958492-console-oauth-config\") pod \"console-f9d7485db-m5689\" (UID: \"97e545f8-81c2-400b-a339-b2b3a1958492\") " pod="openshift-console/console-f9d7485db-m5689" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.890972 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/d22cd363-91c5-46e0-af39-0f08466fbc92-metrics-tls\") pod \"dns-default-4nz5q\" (UID: \"d22cd363-91c5-46e0-af39-0f08466fbc92\") " pod="openshift-dns/dns-default-4nz5q" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.891009 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-46cgj\" (UniqueName: \"kubernetes.io/projected/3519fd59-259f-49f7-875d-4080e17ffe6f-kube-api-access-46cgj\") pod \"machine-config-operator-74547568cd-8q9m5\" (UID: \"3519fd59-259f-49f7-875d-4080e17ffe6f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8q9m5" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.891046 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/268f8834-ad43-4d58-965a-9edddd43ad54-certs\") pod \"machine-config-server-vfjfs\" (UID: \"268f8834-ad43-4d58-965a-9edddd43ad54\") " pod="openshift-machine-config-operator/machine-config-server-vfjfs" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.891073 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/065a1559-77c2-4340-a110-4835055501a7-config\") pod \"service-ca-operator-777779d784-ln89b\" (UID: \"065a1559-77c2-4340-a110-4835055501a7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-ln89b" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.891105 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-698km\" (UniqueName: \"kubernetes.io/projected/d22cd363-91c5-46e0-af39-0f08466fbc92-kube-api-access-698km\") pod \"dns-default-4nz5q\" (UID: \"d22cd363-91c5-46e0-af39-0f08466fbc92\") " pod="openshift-dns/dns-default-4nz5q" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.891138 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cmwnm\" (UniqueName: \"kubernetes.io/projected/268f8834-ad43-4d58-965a-9edddd43ad54-kube-api-access-cmwnm\") pod \"machine-config-server-vfjfs\" (UID: \"268f8834-ad43-4d58-965a-9edddd43ad54\") " pod="openshift-machine-config-operator/machine-config-server-vfjfs" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.891177 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/4b64fc1e-475a-4d69-a7ac-a23a7b5a7909-mountpoint-dir\") pod \"csi-hostpathplugin-pxbm7\" (UID: \"4b64fc1e-475a-4d69-a7ac-a23a7b5a7909\") " pod="hostpath-provisioner/csi-hostpathplugin-pxbm7" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.891209 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/1e52d13d-15dd-437f-8d2d-88709419d1f2-default-certificate\") pod \"router-default-5444994796-vc2n9\" (UID: \"1e52d13d-15dd-437f-8d2d-88709419d1f2\") " pod="openshift-ingress/router-default-5444994796-vc2n9" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.891240 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/4b64fc1e-475a-4d69-a7ac-a23a7b5a7909-plugins-dir\") pod \"csi-hostpathplugin-pxbm7\" (UID: \"4b64fc1e-475a-4d69-a7ac-a23a7b5a7909\") " pod="hostpath-provisioner/csi-hostpathplugin-pxbm7" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.891643 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/4b64fc1e-475a-4d69-a7ac-a23a7b5a7909-plugins-dir\") pod \"csi-hostpathplugin-pxbm7\" (UID: \"4b64fc1e-475a-4d69-a7ac-a23a7b5a7909\") " pod="hostpath-provisioner/csi-hostpathplugin-pxbm7" Nov 25 09:38:10 crc kubenswrapper[4854]: E1125 09:38:10.894295 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:11.39426924 +0000 UTC m=+97.247262616 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.895585 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/3519fd59-259f-49f7-875d-4080e17ffe6f-images\") pod \"machine-config-operator-74547568cd-8q9m5\" (UID: \"3519fd59-259f-49f7-875d-4080e17ffe6f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8q9m5" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.895613 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/4b64fc1e-475a-4d69-a7ac-a23a7b5a7909-csi-data-dir\") pod \"csi-hostpathplugin-pxbm7\" (UID: \"4b64fc1e-475a-4d69-a7ac-a23a7b5a7909\") " pod="hostpath-provisioner/csi-hostpathplugin-pxbm7" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.895763 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/4b64fc1e-475a-4d69-a7ac-a23a7b5a7909-registration-dir\") pod \"csi-hostpathplugin-pxbm7\" (UID: \"4b64fc1e-475a-4d69-a7ac-a23a7b5a7909\") " pod="hostpath-provisioner/csi-hostpathplugin-pxbm7" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.896951 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/24be6029-f079-49a0-904e-6f072eafba7e-signing-cabundle\") pod \"service-ca-9c57cc56f-h5cbk\" (UID: \"24be6029-f079-49a0-904e-6f072eafba7e\") " pod="openshift-service-ca/service-ca-9c57cc56f-h5cbk" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.897013 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/00a6c4d0-b375-4cf1-af7f-a9a27621d050-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-t99fd\" (UID: \"00a6c4d0-b375-4cf1-af7f-a9a27621d050\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-t99fd" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.897381 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/065a1559-77c2-4340-a110-4835055501a7-config\") pod \"service-ca-operator-777779d784-ln89b\" (UID: \"065a1559-77c2-4340-a110-4835055501a7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-ln89b" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.897989 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/10866420-6a3f-42b5-b416-fa3e70f94a20-profile-collector-cert\") pod \"catalog-operator-68c6474976-hm6rl\" (UID: \"10866420-6a3f-42b5-b416-fa3e70f94a20\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hm6rl" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.898216 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/4b64fc1e-475a-4d69-a7ac-a23a7b5a7909-mountpoint-dir\") pod \"csi-hostpathplugin-pxbm7\" (UID: \"4b64fc1e-475a-4d69-a7ac-a23a7b5a7909\") " pod="hostpath-provisioner/csi-hostpathplugin-pxbm7" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.898689 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/3519fd59-259f-49f7-875d-4080e17ffe6f-proxy-tls\") pod \"machine-config-operator-74547568cd-8q9m5\" (UID: \"3519fd59-259f-49f7-875d-4080e17ffe6f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8q9m5" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.899000 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/4293c24a-f631-41fb-9065-b157c62cd0d3-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-6c7r5\" (UID: \"4293c24a-f631-41fb-9065-b157c62cd0d3\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-6c7r5" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.899195 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00a6c4d0-b375-4cf1-af7f-a9a27621d050-config\") pod \"authentication-operator-69f744f599-t99fd\" (UID: \"00a6c4d0-b375-4cf1-af7f-a9a27621d050\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-t99fd" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.899773 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/4b64fc1e-475a-4d69-a7ac-a23a7b5a7909-socket-dir\") pod \"csi-hostpathplugin-pxbm7\" (UID: \"4b64fc1e-475a-4d69-a7ac-a23a7b5a7909\") " pod="hostpath-provisioner/csi-hostpathplugin-pxbm7" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.900442 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-td6m7\" (UniqueName: \"kubernetes.io/projected/5912e701-8be4-4f6b-94a0-6ab69f81cef5-kube-api-access-td6m7\") pod \"multus-admission-controller-857f4d67dd-krv84\" (UID: \"5912e701-8be4-4f6b-94a0-6ab69f81cef5\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-krv84" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.900565 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/268f8834-ad43-4d58-965a-9edddd43ad54-certs\") pod \"machine-config-server-vfjfs\" (UID: \"268f8834-ad43-4d58-965a-9edddd43ad54\") " pod="openshift-machine-config-operator/machine-config-server-vfjfs" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.901100 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/97e545f8-81c2-400b-a339-b2b3a1958492-console-config\") pod \"console-f9d7485db-m5689\" (UID: \"97e545f8-81c2-400b-a339-b2b3a1958492\") " pod="openshift-console/console-f9d7485db-m5689" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.901116 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/1e52d13d-15dd-437f-8d2d-88709419d1f2-stats-auth\") pod \"router-default-5444994796-vc2n9\" (UID: \"1e52d13d-15dd-437f-8d2d-88709419d1f2\") " pod="openshift-ingress/router-default-5444994796-vc2n9" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.901127 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/97e545f8-81c2-400b-a339-b2b3a1958492-trusted-ca-bundle\") pod \"console-f9d7485db-m5689\" (UID: \"97e545f8-81c2-400b-a339-b2b3a1958492\") " pod="openshift-console/console-f9d7485db-m5689" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.901288 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/10866420-6a3f-42b5-b416-fa3e70f94a20-srv-cert\") pod \"catalog-operator-68c6474976-hm6rl\" (UID: \"10866420-6a3f-42b5-b416-fa3e70f94a20\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hm6rl" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.901445 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/97e545f8-81c2-400b-a339-b2b3a1958492-service-ca\") pod \"console-f9d7485db-m5689\" (UID: \"97e545f8-81c2-400b-a339-b2b3a1958492\") " pod="openshift-console/console-f9d7485db-m5689" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.901659 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/97e545f8-81c2-400b-a339-b2b3a1958492-oauth-serving-cert\") pod \"console-f9d7485db-m5689\" (UID: \"97e545f8-81c2-400b-a339-b2b3a1958492\") " pod="openshift-console/console-f9d7485db-m5689" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.901858 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/97e545f8-81c2-400b-a339-b2b3a1958492-console-oauth-config\") pod \"console-f9d7485db-m5689\" (UID: \"97e545f8-81c2-400b-a339-b2b3a1958492\") " pod="openshift-console/console-f9d7485db-m5689" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.902317 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/708f81b5-e3db-4346-9fd5-025ded50d4aa-cert\") pod \"ingress-canary-kjgl8\" (UID: \"708f81b5-e3db-4346-9fd5-025ded50d4aa\") " pod="openshift-ingress-canary/ingress-canary-kjgl8" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.902338 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/1e52d13d-15dd-437f-8d2d-88709419d1f2-default-certificate\") pod \"router-default-5444994796-vc2n9\" (UID: \"1e52d13d-15dd-437f-8d2d-88709419d1f2\") " pod="openshift-ingress/router-default-5444994796-vc2n9" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.901804 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/3519fd59-259f-49f7-875d-4080e17ffe6f-auth-proxy-config\") pod \"machine-config-operator-74547568cd-8q9m5\" (UID: \"3519fd59-259f-49f7-875d-4080e17ffe6f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8q9m5" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.902787 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d22cd363-91c5-46e0-af39-0f08466fbc92-config-volume\") pod \"dns-default-4nz5q\" (UID: \"d22cd363-91c5-46e0-af39-0f08466fbc92\") " pod="openshift-dns/dns-default-4nz5q" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.903153 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1e52d13d-15dd-437f-8d2d-88709419d1f2-service-ca-bundle\") pod \"router-default-5444994796-vc2n9\" (UID: \"1e52d13d-15dd-437f-8d2d-88709419d1f2\") " pod="openshift-ingress/router-default-5444994796-vc2n9" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.903258 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/00a6c4d0-b375-4cf1-af7f-a9a27621d050-serving-cert\") pod \"authentication-operator-69f744f599-t99fd\" (UID: \"00a6c4d0-b375-4cf1-af7f-a9a27621d050\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-t99fd" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.903758 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/97e545f8-81c2-400b-a339-b2b3a1958492-console-serving-cert\") pod \"console-f9d7485db-m5689\" (UID: \"97e545f8-81c2-400b-a339-b2b3a1958492\") " pod="openshift-console/console-f9d7485db-m5689" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.904225 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/d22cd363-91c5-46e0-af39-0f08466fbc92-metrics-tls\") pod \"dns-default-4nz5q\" (UID: \"d22cd363-91c5-46e0-af39-0f08466fbc92\") " pod="openshift-dns/dns-default-4nz5q" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.905019 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/b236ed1d-4b1e-4910-9df1-0db7353a28c5-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-chcz8\" (UID: \"b236ed1d-4b1e-4910-9df1-0db7353a28c5\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-chcz8" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.907312 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/00a6c4d0-b375-4cf1-af7f-a9a27621d050-service-ca-bundle\") pod \"authentication-operator-69f744f599-t99fd\" (UID: \"00a6c4d0-b375-4cf1-af7f-a9a27621d050\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-t99fd" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.907792 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/24be6029-f079-49a0-904e-6f072eafba7e-signing-key\") pod \"service-ca-9c57cc56f-h5cbk\" (UID: \"24be6029-f079-49a0-904e-6f072eafba7e\") " pod="openshift-service-ca/service-ca-9c57cc56f-h5cbk" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.907938 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1e52d13d-15dd-437f-8d2d-88709419d1f2-metrics-certs\") pod \"router-default-5444994796-vc2n9\" (UID: \"1e52d13d-15dd-437f-8d2d-88709419d1f2\") " pod="openshift-ingress/router-default-5444994796-vc2n9" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.909037 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/065a1559-77c2-4340-a110-4835055501a7-serving-cert\") pod \"service-ca-operator-777779d784-ln89b\" (UID: \"065a1559-77c2-4340-a110-4835055501a7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-ln89b" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.909505 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/268f8834-ad43-4d58-965a-9edddd43ad54-node-bootstrap-token\") pod \"machine-config-server-vfjfs\" (UID: \"268f8834-ad43-4d58-965a-9edddd43ad54\") " pod="openshift-machine-config-operator/machine-config-server-vfjfs" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.922760 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c2756\" (UniqueName: \"kubernetes.io/projected/594e4cd2-32ab-4150-b5b3-4d167c35fb84-kube-api-access-c2756\") pod \"packageserver-d55dfcdfc-6rfsb\" (UID: \"594e4cd2-32ab-4150-b5b3-4d167c35fb84\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6rfsb" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.938324 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/64098d81-ada3-4e0d-ac74-3d94b7247437-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-bdb45\" (UID: \"64098d81-ada3-4e0d-ac74-3d94b7247437\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bdb45" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.955964 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/eb03f4b9-cf07-48bd-98a6-f0b9dfdc4e7b-bound-sa-token\") pod \"ingress-operator-5b745b69d9-4nzhc\" (UID: \"eb03f4b9-cf07-48bd-98a6-f0b9dfdc4e7b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4nzhc" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.977463 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hkdkg\" (UniqueName: \"kubernetes.io/projected/d606b975-0bde-4cfa-b190-86b7da1df764-kube-api-access-hkdkg\") pod \"kube-storage-version-migrator-operator-b67b599dd-q7sg8\" (UID: \"d606b975-0bde-4cfa-b190-86b7da1df764\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-q7sg8" Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.992045 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fjr4q"] Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.993605 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:10 crc kubenswrapper[4854]: E1125 09:38:10.993993 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:11.49397624 +0000 UTC m=+97.346969616 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:10 crc kubenswrapper[4854]: I1125 09:38:10.995543 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-srrn8"] Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.006493 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-62lvw\" (UniqueName: \"kubernetes.io/projected/43b016d4-5802-4d1b-a99f-bc7728d7162f-kube-api-access-62lvw\") pod \"cluster-image-registry-operator-dc59b4c8b-nqm2j\" (UID: \"43b016d4-5802-4d1b-a99f-bc7728d7162f\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-nqm2j" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.009237 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-q7sg8" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.017749 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/43b016d4-5802-4d1b-a99f-bc7728d7162f-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-nqm2j\" (UID: \"43b016d4-5802-4d1b-a99f-bc7728d7162f\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-nqm2j" Nov 25 09:38:11 crc kubenswrapper[4854]: W1125 09:38:11.021178 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc8bf226e_44f3_494c_b837_b9e8b9f9904d.slice/crio-e0f04d7ac1e9c5d5744ceb54b828f3f4179789027a73fd820d67d36fb527e121 WatchSource:0}: Error finding container e0f04d7ac1e9c5d5744ceb54b828f3f4179789027a73fd820d67d36fb527e121: Status 404 returned error can't find the container with id e0f04d7ac1e9c5d5744ceb54b828f3f4179789027a73fd820d67d36fb527e121 Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.038136 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/10e58b8d-5283-40d1-9707-d844963263fe-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-rlq4k\" (UID: \"10e58b8d-5283-40d1-9707-d844963263fe\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rlq4k" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.042020 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-krv84" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.050203 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bdb45" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.059275 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hlltc\" (UniqueName: \"kubernetes.io/projected/f34856c6-ef13-4ff0-95b4-8a9f6b88b729-kube-api-access-hlltc\") pod \"apiserver-7bbb656c7d-5hvjb\" (UID: \"f34856c6-ef13-4ff0-95b4-8a9f6b88b729\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5hvjb" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.062301 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r5wdv" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.069340 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6rfsb" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.075405 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-jck8z" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.078510 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wnglk\" (UniqueName: \"kubernetes.io/projected/9117eb3b-0187-4957-b851-da9e4c229c8f-kube-api-access-wnglk\") pod \"marketplace-operator-79b997595-mz9k6\" (UID: \"9117eb3b-0187-4957-b851-da9e4c229c8f\") " pod="openshift-marketplace/marketplace-operator-79b997595-mz9k6" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.095473 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:11 crc kubenswrapper[4854]: E1125 09:38:11.095652 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:11.595620043 +0000 UTC m=+97.448613419 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.095854 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:11 crc kubenswrapper[4854]: E1125 09:38:11.096493 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:11.596483948 +0000 UTC m=+97.449477324 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.099713 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zqknn\" (UniqueName: \"kubernetes.io/projected/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-kube-api-access-zqknn\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.105418 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-cd58s" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.125224 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z9r7k\" (UniqueName: \"kubernetes.io/projected/c464b1b6-988b-430b-b6ac-6b5110888de8-kube-api-access-z9r7k\") pod \"machine-api-operator-5694c8668f-vhjzg\" (UID: \"c464b1b6-988b-430b-b6ac-6b5110888de8\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vhjzg" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.142950 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-flhsn\" (UniqueName: \"kubernetes.io/projected/b5254b1e-3543-4304-b361-d8419d09548e-kube-api-access-flhsn\") pod \"etcd-operator-b45778765-mvn6l\" (UID: \"b5254b1e-3543-4304-b361-d8419d09548e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mvn6l" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.160687 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mq9x9\" (UniqueName: \"kubernetes.io/projected/a4854090-19c4-4d8a-9023-1f3ae3fac5d5-kube-api-access-mq9x9\") pod \"console-operator-58897d9998-p2x8c\" (UID: \"a4854090-19c4-4d8a-9023-1f3ae3fac5d5\") " pod="openshift-console-operator/console-operator-58897d9998-p2x8c" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.183177 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-q7sg8"] Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.190631 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gzp7v\" (UniqueName: \"kubernetes.io/projected/30a1a0d5-62ce-4910-a239-58d552e12e59-kube-api-access-gzp7v\") pod \"machine-config-controller-84d6567774-542js\" (UID: \"30a1a0d5-62ce-4910-a239-58d552e12e59\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-542js" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.197437 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:11 crc kubenswrapper[4854]: E1125 09:38:11.197867 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:11.697851384 +0000 UTC m=+97.550844760 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.205254 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fxv2v\" (UniqueName: \"kubernetes.io/projected/eb03f4b9-cf07-48bd-98a6-f0b9dfdc4e7b-kube-api-access-fxv2v\") pod \"ingress-operator-5b745b69d9-4nzhc\" (UID: \"eb03f4b9-cf07-48bd-98a6-f0b9dfdc4e7b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4nzhc" Nov 25 09:38:11 crc kubenswrapper[4854]: W1125 09:38:11.219607 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd606b975_0bde_4cfa_b190_86b7da1df764.slice/crio-4e10a9fbd06b64fa9ef99d05af03b88068eea023398ad8057909ca2e1ec5acc9 WatchSource:0}: Error finding container 4e10a9fbd06b64fa9ef99d05af03b88068eea023398ad8057909ca2e1ec5acc9: Status 404 returned error can't find the container with id 4e10a9fbd06b64fa9ef99d05af03b88068eea023398ad8057909ca2e1ec5acc9 Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.221113 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-p2x8c" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.241303 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sdrwj\" (UniqueName: \"kubernetes.io/projected/dc1611b6-bd24-4ae5-a332-3a336786b9d5-kube-api-access-sdrwj\") pod \"dns-operator-744455d44c-52bvk\" (UID: \"dc1611b6-bd24-4ae5-a332-3a336786b9d5\") " pod="openshift-dns-operator/dns-operator-744455d44c-52bvk" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.278862 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rhpmr\" (UniqueName: \"kubernetes.io/projected/243c3e75-c67a-4dcf-b76d-bc1920af0a41-kube-api-access-rhpmr\") pod \"route-controller-manager-6576b87f9c-6qzzx\" (UID: \"243c3e75-c67a-4dcf-b76d-bc1920af0a41\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6qzzx" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.279851 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-drjpl\" (UniqueName: \"kubernetes.io/projected/eaeeed45-4bb5-446f-8afb-67ae1c8532c8-kube-api-access-drjpl\") pod \"openshift-controller-manager-operator-756b6f6bc6-2kdq6\" (UID: \"eaeeed45-4bb5-446f-8afb-67ae1c8532c8\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2kdq6" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.292045 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5hvjb" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.299018 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:11 crc kubenswrapper[4854]: E1125 09:38:11.299483 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:11.799469038 +0000 UTC m=+97.652462414 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.300773 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2kdq6" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.301531 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q947b\" (UniqueName: \"kubernetes.io/projected/065a1559-77c2-4340-a110-4835055501a7-kube-api-access-q947b\") pod \"service-ca-operator-777779d784-ln89b\" (UID: \"065a1559-77c2-4340-a110-4835055501a7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-ln89b" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.323898 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-mvn6l" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.328515 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-nqm2j" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.334821 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rlq4k" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.338969 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cmwnm\" (UniqueName: \"kubernetes.io/projected/268f8834-ad43-4d58-965a-9edddd43ad54-kube-api-access-cmwnm\") pod \"machine-config-server-vfjfs\" (UID: \"268f8834-ad43-4d58-965a-9edddd43ad54\") " pod="openshift-machine-config-operator/machine-config-server-vfjfs" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.343032 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-698km\" (UniqueName: \"kubernetes.io/projected/d22cd363-91c5-46e0-af39-0f08466fbc92-kube-api-access-698km\") pod \"dns-default-4nz5q\" (UID: \"d22cd363-91c5-46e0-af39-0f08466fbc92\") " pod="openshift-dns/dns-default-4nz5q" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.355995 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-mz9k6" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.357406 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sf2jv\" (UniqueName: \"kubernetes.io/projected/4293c24a-f631-41fb-9065-b157c62cd0d3-kube-api-access-sf2jv\") pod \"package-server-manager-789f6589d5-6c7r5\" (UID: \"4293c24a-f631-41fb-9065-b157c62cd0d3\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-6c7r5" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.382459 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-vhjzg" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.383150 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-78qx7\" (UniqueName: \"kubernetes.io/projected/708f81b5-e3db-4346-9fd5-025ded50d4aa-kube-api-access-78qx7\") pod \"ingress-canary-kjgl8\" (UID: \"708f81b5-e3db-4346-9fd5-025ded50d4aa\") " pod="openshift-ingress-canary/ingress-canary-kjgl8" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.400193 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:11 crc kubenswrapper[4854]: E1125 09:38:11.400612 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:11.900591967 +0000 UTC m=+97.753585343 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.400729 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6qzzx" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.402999 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8knk7\" (UniqueName: \"kubernetes.io/projected/10866420-6a3f-42b5-b416-fa3e70f94a20-kube-api-access-8knk7\") pod \"catalog-operator-68c6474976-hm6rl\" (UID: \"10866420-6a3f-42b5-b416-fa3e70f94a20\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hm6rl" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.411866 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-542js" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.417654 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4nzhc" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.425089 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-52bvk" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.441653 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8jm8p\" (UniqueName: \"kubernetes.io/projected/c49abfb3-cae2-48dc-93ef-0fb8d3853caf-kube-api-access-8jm8p\") pod \"migrator-59844c95c7-452s8\" (UID: \"c49abfb3-cae2-48dc-93ef-0fb8d3853caf\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-452s8" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.444752 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mwvkl\" (UniqueName: \"kubernetes.io/projected/b236ed1d-4b1e-4910-9df1-0db7353a28c5-kube-api-access-mwvkl\") pod \"control-plane-machine-set-operator-78cbb6b69f-chcz8\" (UID: \"b236ed1d-4b1e-4910-9df1-0db7353a28c5\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-chcz8" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.448552 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-6c7r5" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.458430 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-46cgj\" (UniqueName: \"kubernetes.io/projected/3519fd59-259f-49f7-875d-4080e17ffe6f-kube-api-access-46cgj\") pod \"machine-config-operator-74547568cd-8q9m5\" (UID: \"3519fd59-259f-49f7-875d-4080e17ffe6f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8q9m5" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.468341 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-chcz8" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.479310 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-ln89b" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.486975 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hm6rl" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.487844 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gmf52\" (UniqueName: \"kubernetes.io/projected/4b64fc1e-475a-4d69-a7ac-a23a7b5a7909-kube-api-access-gmf52\") pod \"csi-hostpathplugin-pxbm7\" (UID: \"4b64fc1e-475a-4d69-a7ac-a23a7b5a7909\") " pod="hostpath-provisioner/csi-hostpathplugin-pxbm7" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.504980 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:11 crc kubenswrapper[4854]: E1125 09:38:11.505497 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:12.005476712 +0000 UTC m=+97.858470098 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.529113 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4fzcp\" (UniqueName: \"kubernetes.io/projected/00a6c4d0-b375-4cf1-af7f-a9a27621d050-kube-api-access-4fzcp\") pod \"authentication-operator-69f744f599-t99fd\" (UID: \"00a6c4d0-b375-4cf1-af7f-a9a27621d050\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-t99fd" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.530412 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wzngf\" (UniqueName: \"kubernetes.io/projected/97e545f8-81c2-400b-a339-b2b3a1958492-kube-api-access-wzngf\") pod \"console-f9d7485db-m5689\" (UID: \"97e545f8-81c2-400b-a339-b2b3a1958492\") " pod="openshift-console/console-f9d7485db-m5689" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.537885 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-vfjfs" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.541792 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-jvdld" event={"ID":"a2d7193d-eb53-446a-a96c-49d28dbbe724","Type":"ContainerStarted","Data":"9e0a851e14a32e48b9ccef7463ea9e957f721807f4f7a1edb5014d1e8929a9aa"} Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.541842 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-jvdld" event={"ID":"a2d7193d-eb53-446a-a96c-49d28dbbe724","Type":"ContainerStarted","Data":"5877ad6f0c8dfc475f0e53e209d9676db37cf2745b0185c5f40ffeb2787397a4"} Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.542350 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rfxmc\" (UniqueName: \"kubernetes.io/projected/1e52d13d-15dd-437f-8d2d-88709419d1f2-kube-api-access-rfxmc\") pod \"router-default-5444994796-vc2n9\" (UID: \"1e52d13d-15dd-437f-8d2d-88709419d1f2\") " pod="openshift-ingress/router-default-5444994796-vc2n9" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.543875 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-jvdld" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.550204 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-pxbm7" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.550618 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fjr4q" event={"ID":"2396b594-22b4-4052-b767-54e2aaf1b0dc","Type":"ContainerStarted","Data":"d46bca151ead795c477e738b7e7574cb49f967b1e7b604b196d84a58a8d39923"} Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.550650 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fjr4q" event={"ID":"2396b594-22b4-4052-b767-54e2aaf1b0dc","Type":"ContainerStarted","Data":"9bb741837eca7882a3b83e52696fcd8fa5bb7ead281256ab1ee6750986786071"} Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.551608 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r5wdv"] Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.552043 4854 patch_prober.go:28] interesting pod/downloads-7954f5f757-jvdld container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.552075 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-jvdld" podUID="a2d7193d-eb53-446a-a96c-49d28dbbe724" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.552832 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-45d9l" event={"ID":"44450497-9e24-4702-8614-894197881904","Type":"ContainerStarted","Data":"be5134bbf7b572eec8f6ec0539bca33f6957c15019f5e45f3accbe2492023012"} Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.552893 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-45d9l" event={"ID":"44450497-9e24-4702-8614-894197881904","Type":"ContainerStarted","Data":"badcc0ece09f93e8f9b095dde4407e8320ecd74723345dc69ace3233da53d7c1"} Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.561334 4854 generic.go:334] "Generic (PLEG): container finished" podID="1a9dfec2-b952-4e84-9d99-377792feb851" containerID="f32659d8d78159fe1dad127d38f669c5218308c44d3db7f66009eb711b0855cc" exitCode=0 Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.561536 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-sb8md" event={"ID":"1a9dfec2-b952-4e84-9d99-377792feb851","Type":"ContainerDied","Data":"f32659d8d78159fe1dad127d38f669c5218308c44d3db7f66009eb711b0855cc"} Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.561581 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-sb8md" event={"ID":"1a9dfec2-b952-4e84-9d99-377792feb851","Type":"ContainerStarted","Data":"05de777c0ecb348ea28fc7d1088573eded286254ba5023c707391274482aa4f1"} Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.564284 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-kjgl8" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.566622 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-62dll" event={"ID":"9dab85a1-11f8-45ee-ab81-394ead31aab5","Type":"ContainerStarted","Data":"7d68198c16d489c23c6fb9d834cbd8fe748f0e9863c701f4826dd2cbcb53874b"} Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.566703 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-62dll" event={"ID":"9dab85a1-11f8-45ee-ab81-394ead31aab5","Type":"ContainerStarted","Data":"c7dfb0bbf86fd9bd0e9d6c2a07a0c0704826ff5d674c1581a8ebfe9c8495a6d7"} Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.567250 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-krv84"] Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.568565 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.568737 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-4nz5q" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.570097 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mnm2q\" (UniqueName: \"kubernetes.io/projected/24be6029-f079-49a0-904e-6f072eafba7e-kube-api-access-mnm2q\") pod \"service-ca-9c57cc56f-h5cbk\" (UID: \"24be6029-f079-49a0-904e-6f072eafba7e\") " pod="openshift-service-ca/service-ca-9c57cc56f-h5cbk" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.571230 4854 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-62dll container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.9:6443/healthz\": dial tcp 10.217.0.9:6443: connect: connection refused" start-of-body= Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.571268 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-62dll" podUID="9dab85a1-11f8-45ee-ab81-394ead31aab5" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.9:6443/healthz\": dial tcp 10.217.0.9:6443: connect: connection refused" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.573466 4854 generic.go:334] "Generic (PLEG): container finished" podID="cc554d84-4ddd-468b-ac0f-b41f2ad5d26f" containerID="e6bf2c18caff8261e4d172a52a6a65522cfd6785591f392f85235c17203e2f8c" exitCode=0 Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.573516 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-cbzc5" event={"ID":"cc554d84-4ddd-468b-ac0f-b41f2ad5d26f","Type":"ContainerDied","Data":"e6bf2c18caff8261e4d172a52a6a65522cfd6785591f392f85235c17203e2f8c"} Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.573537 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-cbzc5" event={"ID":"cc554d84-4ddd-468b-ac0f-b41f2ad5d26f","Type":"ContainerStarted","Data":"cb9a567ed6a6d780bdd7175e07fe414f34657579d07e46c3cc47b2a81e765c12"} Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.577432 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hsjfc" event={"ID":"4190d598-71ee-4d5d-885c-914ba454df27","Type":"ContainerStarted","Data":"187775f0c367d43d139d45d8072cfa5b6a8622d379780a8a7b0fd56b64c85704"} Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.577474 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hsjfc" event={"ID":"4190d598-71ee-4d5d-885c-914ba454df27","Type":"ContainerStarted","Data":"5906c4b9cc1a0bc1ef672c8a66875eab869ed3c73608d907c23f36ef1a631585"} Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.577485 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hsjfc" event={"ID":"4190d598-71ee-4d5d-885c-914ba454df27","Type":"ContainerStarted","Data":"6a84af17abad7c39a56a342bd7d0c7572860d31f3282aa15e497ceddd270e650"} Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.580373 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-q7sg8" event={"ID":"d606b975-0bde-4cfa-b190-86b7da1df764","Type":"ContainerStarted","Data":"4e10a9fbd06b64fa9ef99d05af03b88068eea023398ad8057909ca2e1ec5acc9"} Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.608891 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:11 crc kubenswrapper[4854]: E1125 09:38:11.609972 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:12.109303908 +0000 UTC m=+97.962297284 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.624192 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-srrn8" event={"ID":"c8bf226e-44f3-494c-b837-b9e8b9f9904d","Type":"ContainerStarted","Data":"ef0a4591bbf135c9d7c65161501e1548235260dce2379c2a7298441b4d5b4533"} Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.624219 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-srrn8" event={"ID":"c8bf226e-44f3-494c-b837-b9e8b9f9904d","Type":"ContainerStarted","Data":"e0f04d7ac1e9c5d5744ceb54b828f3f4179789027a73fd820d67d36fb527e121"} Nov 25 09:38:11 crc kubenswrapper[4854]: W1125 09:38:11.663983 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc06ba819_662b_4eaa_93f8_ea5e462d5aec.slice/crio-b61f9a7ae296082368bc6b8b88f63a904fc31819de93364cc0b3c95043fe097f WatchSource:0}: Error finding container b61f9a7ae296082368bc6b8b88f63a904fc31819de93364cc0b3c95043fe097f: Status 404 returned error can't find the container with id b61f9a7ae296082368bc6b8b88f63a904fc31819de93364cc0b3c95043fe097f Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.684992 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6rfsb"] Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.697249 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bdb45"] Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.704436 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401050-jck8z"] Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.710321 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:11 crc kubenswrapper[4854]: W1125 09:38:11.729725 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod268f8834_ad43_4d58_965a_9edddd43ad54.slice/crio-8e2316d1e91437867939be1c9aa23cc09b58f49fbce31700393e0a14dd1acb37 WatchSource:0}: Error finding container 8e2316d1e91437867939be1c9aa23cc09b58f49fbce31700393e0a14dd1acb37: Status 404 returned error can't find the container with id 8e2316d1e91437867939be1c9aa23cc09b58f49fbce31700393e0a14dd1acb37 Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.735576 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-452s8" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.745712 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-vc2n9" Nov 25 09:38:11 crc kubenswrapper[4854]: E1125 09:38:11.751642 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:12.251619852 +0000 UTC m=+98.104613228 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.758006 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8q9m5" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.797238 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-p2x8c"] Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.800148 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-cd58s"] Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.801611 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-t99fd" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.810996 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-h5cbk" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.812579 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:11 crc kubenswrapper[4854]: E1125 09:38:11.812740 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:12.31271925 +0000 UTC m=+98.165712626 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.812847 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:11 crc kubenswrapper[4854]: E1125 09:38:11.814321 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:12.314307225 +0000 UTC m=+98.167300601 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.821808 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-m5689" Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.916235 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:11 crc kubenswrapper[4854]: E1125 09:38:11.916505 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:12.416486924 +0000 UTC m=+98.269480300 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.916653 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:11 crc kubenswrapper[4854]: E1125 09:38:11.916993 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:12.416983568 +0000 UTC m=+98.269976944 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:11 crc kubenswrapper[4854]: I1125 09:38:11.993737 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2kdq6"] Nov 25 09:38:11 crc kubenswrapper[4854]: W1125 09:38:11.995039 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda4854090_19c4_4d8a_9023_1f3ae3fac5d5.slice/crio-443ec71776b821c37e906512df0c22f703528e09572aa3dba66944fb613e1851 WatchSource:0}: Error finding container 443ec71776b821c37e906512df0c22f703528e09572aa3dba66944fb613e1851: Status 404 returned error can't find the container with id 443ec71776b821c37e906512df0c22f703528e09572aa3dba66944fb613e1851 Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.016931 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-nqm2j"] Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.017315 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:12 crc kubenswrapper[4854]: E1125 09:38:12.017613 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:12.517598773 +0000 UTC m=+98.370592149 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.119405 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:12 crc kubenswrapper[4854]: E1125 09:38:12.119853 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:12.619838134 +0000 UTC m=+98.472831510 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.172256 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rlq4k"] Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.196143 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-mz9k6"] Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.208290 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-5hvjb"] Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.220286 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:12 crc kubenswrapper[4854]: E1125 09:38:12.220424 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:12.720402017 +0000 UTC m=+98.573395393 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.220505 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:12 crc kubenswrapper[4854]: E1125 09:38:12.220892 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:12.720880841 +0000 UTC m=+98.573874217 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:12 crc kubenswrapper[4854]: W1125 09:38:12.287554 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod10e58b8d_5283_40d1_9707_d844963263fe.slice/crio-88c0c539a83f58f3460e448b4965c1777dadf080b3f2ec46e5f4d7730b39908c WatchSource:0}: Error finding container 88c0c539a83f58f3460e448b4965c1777dadf080b3f2ec46e5f4d7730b39908c: Status 404 returned error can't find the container with id 88c0c539a83f58f3460e448b4965c1777dadf080b3f2ec46e5f4d7730b39908c Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.321436 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:12 crc kubenswrapper[4854]: E1125 09:38:12.324973 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:12.824951983 +0000 UTC m=+98.677945359 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.426385 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:12 crc kubenswrapper[4854]: E1125 09:38:12.427031 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:12.927016089 +0000 UTC m=+98.780009465 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.527793 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:12 crc kubenswrapper[4854]: E1125 09:38:12.527960 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:13.027939562 +0000 UTC m=+98.880932948 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.528047 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:12 crc kubenswrapper[4854]: E1125 09:38:12.528651 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:13.028640493 +0000 UTC m=+98.881633869 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.629557 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:12 crc kubenswrapper[4854]: E1125 09:38:12.629978 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:13.129940167 +0000 UTC m=+98.982933543 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.635305 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=2.635260438 podStartE2EDuration="2.635260438s" podCreationTimestamp="2025-11-25 09:38:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:12.632626283 +0000 UTC m=+98.485619669" watchObservedRunningTime="2025-11-25 09:38:12.635260438 +0000 UTC m=+98.488253814" Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.636329 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:12 crc kubenswrapper[4854]: E1125 09:38:12.637985 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:13.137969294 +0000 UTC m=+98.990962670 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.678021 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6rfsb" event={"ID":"594e4cd2-32ab-4150-b5b3-4d167c35fb84","Type":"ContainerStarted","Data":"9a5db28104b52be07f0506ecb08dda1a14da661e858fe5f553dc268ffc5d4f1e"} Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.678066 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6rfsb" event={"ID":"594e4cd2-32ab-4150-b5b3-4d167c35fb84","Type":"ContainerStarted","Data":"e899416a7e0f213547c19b312e90a811bfa32d1c89e5fd75e0c798fc8a931a5c"} Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.679051 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6rfsb" Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.680534 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-vhjzg"] Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.680571 4854 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-6rfsb container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.21:5443/healthz\": dial tcp 10.217.0.21:5443: connect: connection refused" start-of-body= Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.680619 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6rfsb" podUID="594e4cd2-32ab-4150-b5b3-4d167c35fb84" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.21:5443/healthz\": dial tcp 10.217.0.21:5443: connect: connection refused" Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.683795 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-52bvk"] Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.696239 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-jck8z" event={"ID":"7ea5d904-b5da-4a4a-9221-f808841b0052","Type":"ContainerStarted","Data":"40d03eaa09f7fc9dfaaa634461b9a2040747d3879ba2b9a4625a1c97b0923f0d"} Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.696281 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-jck8z" event={"ID":"7ea5d904-b5da-4a4a-9221-f808841b0052","Type":"ContainerStarted","Data":"2c6b63829b21d56c72abdc68f17f1cd0aa810d6a31a0d2cd72f5bae5cfa79855"} Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.720730 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-vc2n9" event={"ID":"1e52d13d-15dd-437f-8d2d-88709419d1f2","Type":"ContainerStarted","Data":"107a5ad12e754b28f154bec1002e84b00fc31fb4ac35de48dc96573913731062"} Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.720794 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-vc2n9" event={"ID":"1e52d13d-15dd-437f-8d2d-88709419d1f2","Type":"ContainerStarted","Data":"98ab21cde9e6e4b0b7ff12f1f1bea542c0e7ee3086f232c1c05fa3fec02be7d4"} Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.739848 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:12 crc kubenswrapper[4854]: E1125 09:38:12.740653 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:13.240634626 +0000 UTC m=+99.093628002 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.748515 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-6qzzx"] Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.751662 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-vc2n9" Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.753831 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-nqm2j" event={"ID":"43b016d4-5802-4d1b-a99f-bc7728d7162f","Type":"ContainerStarted","Data":"82e772791a48fd991f831c4320171a7477f4ba871c2d072aaf0eb6d0d489d790"} Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.754158 4854 patch_prober.go:28] interesting pod/router-default-5444994796-vc2n9 container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.754191 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vc2n9" podUID="1e52d13d-15dd-437f-8d2d-88709419d1f2" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.755487 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-mvn6l"] Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.758919 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5hvjb" event={"ID":"f34856c6-ef13-4ff0-95b4-8a9f6b88b729","Type":"ContainerStarted","Data":"2cbc589bfc42dd1e4c737e7a3c0b67448039a76de4e629575846788b61e25060"} Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.779212 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-p2x8c" event={"ID":"a4854090-19c4-4d8a-9023-1f3ae3fac5d5","Type":"ContainerStarted","Data":"443ec71776b821c37e906512df0c22f703528e09572aa3dba66944fb613e1851"} Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.781032 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-cd58s" event={"ID":"8d4de16b-3a82-47e6-aab5-e41ab8f871fb","Type":"ContainerStarted","Data":"4a595744a5864ee821a9d25ebf677a706dfec907fe2ce14fb3b829e3514bb217"} Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.841809 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-srrn8" podStartSLOduration=76.841768186 podStartE2EDuration="1m16.841768186s" podCreationTimestamp="2025-11-25 09:36:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:12.83482349 +0000 UTC m=+98.687816866" watchObservedRunningTime="2025-11-25 09:38:12.841768186 +0000 UTC m=+98.694761562" Nov 25 09:38:12 crc kubenswrapper[4854]: E1125 09:38:12.846032 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:13.346017276 +0000 UTC m=+99.199010642 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.847209 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2kdq6" event={"ID":"eaeeed45-4bb5-446f-8afb-67ae1c8532c8","Type":"ContainerStarted","Data":"3561ec72f5b9e217b83eb9d809d30306cc66b6f9e4937ab9374d8daae34e6d06"} Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.847365 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.872421 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bdb45" event={"ID":"64098d81-ada3-4e0d-ac74-3d94b7247437","Type":"ContainerStarted","Data":"7a779613ea9aff72e8b351732a48ff257cc76ea2ae1d0ffaccf2f9b7c472442a"} Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.921337 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-q7sg8" event={"ID":"d606b975-0bde-4cfa-b190-86b7da1df764","Type":"ContainerStarted","Data":"85826be782fc1d682fb12508c3fc1999386da7480b9ab1ca83cb0ba68d96a288"} Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.942564 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-542js"] Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.948927 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-mz9k6" event={"ID":"9117eb3b-0187-4957-b851-da9e4c229c8f","Type":"ContainerStarted","Data":"c322352cd33e8cee1967aa81d92ed36962cac897f0592cba5f837fe4beb13f99"} Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.949275 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-mz9k6" Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.949605 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:12 crc kubenswrapper[4854]: E1125 09:38:12.950170 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:13.45014615 +0000 UTC m=+99.303139526 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.950332 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:12 crc kubenswrapper[4854]: E1125 09:38:12.950645 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:13.450633344 +0000 UTC m=+99.303626720 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.965717 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-pxbm7"] Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.969555 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-vfjfs" event={"ID":"268f8834-ad43-4d58-965a-9edddd43ad54","Type":"ContainerStarted","Data":"c7f519f3eceb53e5c0a993cd93e65332405956e9c7a43067566b2eba0156f9a6"} Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.969594 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-vfjfs" event={"ID":"268f8834-ad43-4d58-965a-9edddd43ad54","Type":"ContainerStarted","Data":"8e2316d1e91437867939be1c9aa23cc09b58f49fbce31700393e0a14dd1acb37"} Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.973186 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rlq4k" event={"ID":"10e58b8d-5283-40d1-9707-d844963263fe","Type":"ContainerStarted","Data":"88c0c539a83f58f3460e448b4965c1777dadf080b3f2ec46e5f4d7730b39908c"} Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.989318 4854 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-mz9k6 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/healthz\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Nov 25 09:38:12 crc kubenswrapper[4854]: I1125 09:38:12.989385 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-mz9k6" podUID="9117eb3b-0187-4957-b851-da9e4c229c8f" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.28:8080/healthz\": dial tcp 10.217.0.28:8080: connect: connection refused" Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.000996 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fjr4q" event={"ID":"2396b594-22b4-4052-b767-54e2aaf1b0dc","Type":"ContainerStarted","Data":"cef7fce8180f5b0fb61ca145f793704da7532b37316e4875362b542c722d46d0"} Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.014416 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-6c7r5"] Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.051168 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:13 crc kubenswrapper[4854]: E1125 09:38:13.053526 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:13.553497323 +0000 UTC m=+99.406490759 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.058789 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-cbzc5" Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.058819 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-cbzc5" event={"ID":"cc554d84-4ddd-468b-ac0f-b41f2ad5d26f","Type":"ContainerStarted","Data":"8cbefc2221e96fdf36d083bc3acd47890ff68fc4aca707b336d539247d6f044e"} Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.058837 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-ln89b"] Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.058855 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hm6rl"] Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.058865 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-kjgl8"] Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.061275 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-4nzhc"] Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.071735 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-chcz8"] Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.071799 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-t99fd"] Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.073330 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-m5689"] Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.075277 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-45d9l" podStartSLOduration=77.075232738 podStartE2EDuration="1m17.075232738s" podCreationTimestamp="2025-11-25 09:36:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:13.050173579 +0000 UTC m=+98.903166955" watchObservedRunningTime="2025-11-25 09:38:13.075232738 +0000 UTC m=+98.928226114" Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.077594 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-4nz5q"] Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.078494 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-452s8"] Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.080277 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-8q9m5"] Nov 25 09:38:13 crc kubenswrapper[4854]: W1125 09:38:13.112303 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4b64fc1e_475a_4d69_a7ac_a23a7b5a7909.slice/crio-bb769b42fc682475464aab688dcbc2cf0e7eb3a9d4775ab5cb44d782f13d5a8c WatchSource:0}: Error finding container bb769b42fc682475464aab688dcbc2cf0e7eb3a9d4775ab5cb44d782f13d5a8c: Status 404 returned error can't find the container with id bb769b42fc682475464aab688dcbc2cf0e7eb3a9d4775ab5cb44d782f13d5a8c Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.117999 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-62dll" podStartSLOduration=77.117980456 podStartE2EDuration="1m17.117980456s" podCreationTimestamp="2025-11-25 09:36:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:13.094027638 +0000 UTC m=+98.947021014" watchObservedRunningTime="2025-11-25 09:38:13.117980456 +0000 UTC m=+98.970973832" Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.123582 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r5wdv" event={"ID":"c06ba819-662b-4eaa-93f8-ea5e462d5aec","Type":"ContainerStarted","Data":"4a68f1928e12d187c53630bb88825bd7a3c4398a7b4c1df63a3580ba5acfa169"} Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.123617 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r5wdv" event={"ID":"c06ba819-662b-4eaa-93f8-ea5e462d5aec","Type":"ContainerStarted","Data":"b61f9a7ae296082368bc6b8b88f63a904fc31819de93364cc0b3c95043fe097f"} Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.124658 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r5wdv" Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.134847 4854 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-r5wdv container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.31:8443/healthz\": dial tcp 10.217.0.31:8443: connect: connection refused" start-of-body= Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.134893 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r5wdv" podUID="c06ba819-662b-4eaa-93f8-ea5e462d5aec" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.31:8443/healthz\": dial tcp 10.217.0.31:8443: connect: connection refused" Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.153498 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:13 crc kubenswrapper[4854]: E1125 09:38:13.153819 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:13.653807299 +0000 UTC m=+99.506800675 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.196781 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-krv84" event={"ID":"5912e701-8be4-4f6b-94a0-6ab69f81cef5","Type":"ContainerStarted","Data":"e9d20cea1165d512f7ef8726a17e1982309583653c4cd62ef30416ea6dfbb1f5"} Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.196828 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-krv84" event={"ID":"5912e701-8be4-4f6b-94a0-6ab69f81cef5","Type":"ContainerStarted","Data":"699c7f54867ef915b0bfb8365be11ae1820a156ee992672db534eead6f494670"} Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.197894 4854 patch_prober.go:28] interesting pod/downloads-7954f5f757-jvdld container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.197934 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-jvdld" podUID="a2d7193d-eb53-446a-a96c-49d28dbbe724" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.198591 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-srrn8" Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.204267 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.208724 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-srrn8" Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.231040 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-h5cbk"] Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.240383 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hsjfc" podStartSLOduration=78.240359307 podStartE2EDuration="1m18.240359307s" podCreationTimestamp="2025-11-25 09:36:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:13.238266097 +0000 UTC m=+99.091259473" watchObservedRunningTime="2025-11-25 09:38:13.240359307 +0000 UTC m=+99.093352683" Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.257064 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:13 crc kubenswrapper[4854]: E1125 09:38:13.257290 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:13.757238394 +0000 UTC m=+99.610231770 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.257511 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:13 crc kubenswrapper[4854]: E1125 09:38:13.260409 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:13.760392743 +0000 UTC m=+99.613386119 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.363209 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:13 crc kubenswrapper[4854]: E1125 09:38:13.363854 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:13.863836448 +0000 UTC m=+99.716829824 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.412700 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-jvdld" podStartSLOduration=77.412663578 podStartE2EDuration="1m17.412663578s" podCreationTimestamp="2025-11-25 09:36:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:13.409924741 +0000 UTC m=+99.262918107" watchObservedRunningTime="2025-11-25 09:38:13.412663578 +0000 UTC m=+99.265656954" Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.479889 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:13 crc kubenswrapper[4854]: E1125 09:38:13.480285 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:13.980270339 +0000 UTC m=+99.833263725 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.517940 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r5wdv" podStartSLOduration=76.517923385 podStartE2EDuration="1m16.517923385s" podCreationTimestamp="2025-11-25 09:36:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:13.517177123 +0000 UTC m=+99.370170499" watchObservedRunningTime="2025-11-25 09:38:13.517923385 +0000 UTC m=+99.370916781" Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.565843 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fjr4q" podStartSLOduration=77.565821849 podStartE2EDuration="1m17.565821849s" podCreationTimestamp="2025-11-25 09:36:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:13.541794249 +0000 UTC m=+99.394787645" watchObservedRunningTime="2025-11-25 09:38:13.565821849 +0000 UTC m=+99.418815225" Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.586477 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:13 crc kubenswrapper[4854]: E1125 09:38:13.587341 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:14.087320936 +0000 UTC m=+99.940314332 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.618709 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-mz9k6" podStartSLOduration=76.618687294 podStartE2EDuration="1m16.618687294s" podCreationTimestamp="2025-11-25 09:36:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:13.615863054 +0000 UTC m=+99.468856430" watchObservedRunningTime="2025-11-25 09:38:13.618687294 +0000 UTC m=+99.471680670" Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.685816 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-vc2n9" podStartSLOduration=77.685793531 podStartE2EDuration="1m17.685793531s" podCreationTimestamp="2025-11-25 09:36:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:13.679237685 +0000 UTC m=+99.532231081" watchObservedRunningTime="2025-11-25 09:38:13.685793531 +0000 UTC m=+99.538786907" Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.687600 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:13 crc kubenswrapper[4854]: E1125 09:38:13.687997 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:14.187984133 +0000 UTC m=+100.040977509 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.688993 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6rfsb" podStartSLOduration=76.688984271 podStartE2EDuration="1m16.688984271s" podCreationTimestamp="2025-11-25 09:36:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:13.646097238 +0000 UTC m=+99.499090614" watchObservedRunningTime="2025-11-25 09:38:13.688984271 +0000 UTC m=+99.541977647" Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.727491 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-q7sg8" podStartSLOduration=76.727473709 podStartE2EDuration="1m16.727473709s" podCreationTimestamp="2025-11-25 09:36:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:13.724869926 +0000 UTC m=+99.577863302" watchObservedRunningTime="2025-11-25 09:38:13.727473709 +0000 UTC m=+99.580467085" Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.764428 4854 patch_prober.go:28] interesting pod/router-default-5444994796-vc2n9 container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.764488 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vc2n9" podUID="1e52d13d-15dd-437f-8d2d-88709419d1f2" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.774969 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-cbzc5" podStartSLOduration=77.774949161 podStartE2EDuration="1m17.774949161s" podCreationTimestamp="2025-11-25 09:36:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:13.772468462 +0000 UTC m=+99.625461848" watchObservedRunningTime="2025-11-25 09:38:13.774949161 +0000 UTC m=+99.627942547" Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.792246 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:13 crc kubenswrapper[4854]: E1125 09:38:13.792776 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:14.292756816 +0000 UTC m=+100.145750192 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.882036 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bdb45" podStartSLOduration=77.88201823 podStartE2EDuration="1m17.88201823s" podCreationTimestamp="2025-11-25 09:36:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:13.801157703 +0000 UTC m=+99.654151079" watchObservedRunningTime="2025-11-25 09:38:13.88201823 +0000 UTC m=+99.735011606" Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.894247 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:13 crc kubenswrapper[4854]: E1125 09:38:13.894610 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:14.394595655 +0000 UTC m=+100.247589031 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.979154 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rlq4k" podStartSLOduration=77.979135295 podStartE2EDuration="1m17.979135295s" podCreationTimestamp="2025-11-25 09:36:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:13.947234283 +0000 UTC m=+99.800227699" watchObservedRunningTime="2025-11-25 09:38:13.979135295 +0000 UTC m=+99.832128671" Nov 25 09:38:13 crc kubenswrapper[4854]: I1125 09:38:13.996198 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:13 crc kubenswrapper[4854]: E1125 09:38:13.996579 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:14.496560138 +0000 UTC m=+100.349553524 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.065452 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-jck8z" podStartSLOduration=78.065432015 podStartE2EDuration="1m18.065432015s" podCreationTimestamp="2025-11-25 09:36:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:13.991881126 +0000 UTC m=+99.844874502" watchObservedRunningTime="2025-11-25 09:38:14.065432015 +0000 UTC m=+99.918425391" Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.098852 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:14 crc kubenswrapper[4854]: E1125 09:38:14.099134 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:14.599121658 +0000 UTC m=+100.452115034 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.202313 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:14 crc kubenswrapper[4854]: E1125 09:38:14.203011 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:14.702995315 +0000 UTC m=+100.555988691 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.294509 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-t99fd" event={"ID":"00a6c4d0-b375-4cf1-af7f-a9a27621d050","Type":"ContainerStarted","Data":"6cb8fee34ca65b575d708e6b82402d1167b367e603a122f2cb0ab4c41e009f10"} Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.320533 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:14 crc kubenswrapper[4854]: E1125 09:38:14.320972 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:14.82095792 +0000 UTC m=+100.673951296 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.352387 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-452s8" event={"ID":"c49abfb3-cae2-48dc-93ef-0fb8d3853caf","Type":"ContainerStarted","Data":"04ff0e303573c0becb582602dcc0b17bbd719459ebca4c258af7e568c95e3512"} Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.352751 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-452s8" event={"ID":"c49abfb3-cae2-48dc-93ef-0fb8d3853caf","Type":"ContainerStarted","Data":"0c443c05b240b459e7c89331f44504dded7a7ed6bd0f2bff7a5025e952eb8224"} Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.354857 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-6c7r5" event={"ID":"4293c24a-f631-41fb-9065-b157c62cd0d3","Type":"ContainerStarted","Data":"56bdc9fa94db72c5bbecba20df72a01efb55a19c5989a65e85a47994e51780a7"} Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.354887 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-6c7r5" event={"ID":"4293c24a-f631-41fb-9065-b157c62cd0d3","Type":"ContainerStarted","Data":"b7ea48fa657da2a202a2e4a21decaf176026243b5d2e57797c446f9ab6769786"} Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.413289 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-ln89b" event={"ID":"065a1559-77c2-4340-a110-4835055501a7","Type":"ContainerStarted","Data":"2cdb47ccaa02a9cb6e5dc2f18522575895397f17411aad5d0a0d290cf15c44f9"} Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.413341 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-ln89b" event={"ID":"065a1559-77c2-4340-a110-4835055501a7","Type":"ContainerStarted","Data":"06965e3711212d4defea32d0c2b04a6f201f0c1bb28e7724e9a0ec015d847086"} Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.424214 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:14 crc kubenswrapper[4854]: E1125 09:38:14.424764 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:14.924744184 +0000 UTC m=+100.777737560 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.431821 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hm6rl" event={"ID":"10866420-6a3f-42b5-b416-fa3e70f94a20","Type":"ContainerStarted","Data":"ef245a6a36b1367a0e95726fc2642f82948679a311c90f7dc4f35eae4a814238"} Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.431871 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hm6rl" event={"ID":"10866420-6a3f-42b5-b416-fa3e70f94a20","Type":"ContainerStarted","Data":"b491e38636ec7c5e85f59661b64b33161b63afce7e7b12a6d22fd9f129e28cb9"} Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.432532 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hm6rl" Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.436871 4854 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-hm6rl container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" start-of-body= Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.436913 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hm6rl" podUID="10866420-6a3f-42b5-b416-fa3e70f94a20" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.459489 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-ln89b" podStartSLOduration=77.459472277 podStartE2EDuration="1m17.459472277s" podCreationTimestamp="2025-11-25 09:36:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:14.457660325 +0000 UTC m=+100.310653701" watchObservedRunningTime="2025-11-25 09:38:14.459472277 +0000 UTC m=+100.312465653" Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.468913 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-vfjfs" podStartSLOduration=6.468882883 podStartE2EDuration="6.468882883s" podCreationTimestamp="2025-11-25 09:38:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:14.072263509 +0000 UTC m=+99.925256885" watchObservedRunningTime="2025-11-25 09:38:14.468882883 +0000 UTC m=+100.321876259" Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.493013 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-nqm2j" event={"ID":"43b016d4-5802-4d1b-a99f-bc7728d7162f","Type":"ContainerStarted","Data":"832f11003fd2ae67003394b90f0da292c4fb41ca5d0eae086b2793c8d12c9061"} Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.502991 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hm6rl" podStartSLOduration=77.502976256 podStartE2EDuration="1m17.502976256s" podCreationTimestamp="2025-11-25 09:36:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:14.502462302 +0000 UTC m=+100.355455688" watchObservedRunningTime="2025-11-25 09:38:14.502976256 +0000 UTC m=+100.355969632" Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.528415 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.528904 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8q9m5" event={"ID":"3519fd59-259f-49f7-875d-4080e17ffe6f","Type":"ContainerStarted","Data":"eed9ec7a5befd58cf20072103ba2684891f6dc4b3563b149a5f5b284b82296b7"} Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.528951 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8q9m5" event={"ID":"3519fd59-259f-49f7-875d-4080e17ffe6f","Type":"ContainerStarted","Data":"3b6332a75118be25b038793950ab1a1736918e6ec4e76119a5a912e72c9803c3"} Nov 25 09:38:14 crc kubenswrapper[4854]: E1125 09:38:14.529241 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:15.029227289 +0000 UTC m=+100.882220665 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.556841 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-kjgl8" event={"ID":"708f81b5-e3db-4346-9fd5-025ded50d4aa","Type":"ContainerStarted","Data":"97f1d83fdcfedc17d1179978a74d74c7fd8cabd96961970c7cd8974396fa6c2d"} Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.558734 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-nqm2j" podStartSLOduration=78.558718883 podStartE2EDuration="1m18.558718883s" podCreationTimestamp="2025-11-25 09:36:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:14.558417494 +0000 UTC m=+100.411410870" watchObservedRunningTime="2025-11-25 09:38:14.558718883 +0000 UTC m=+100.411712259" Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.588166 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-mvn6l" event={"ID":"b5254b1e-3543-4304-b361-d8419d09548e","Type":"ContainerStarted","Data":"01dc3cb64d76378bec77cef03449b050a50785e80845dd414f731963f25d8ed4"} Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.588216 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-mvn6l" event={"ID":"b5254b1e-3543-4304-b361-d8419d09548e","Type":"ContainerStarted","Data":"3b0885e0da46c5b3f1f8ef5114ed7dfc11e6b4e1bfcf51d18c6b99731da05171"} Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.595420 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-kjgl8" podStartSLOduration=6.59540373 podStartE2EDuration="6.59540373s" podCreationTimestamp="2025-11-25 09:38:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:14.592386285 +0000 UTC m=+100.445379661" watchObservedRunningTime="2025-11-25 09:38:14.59540373 +0000 UTC m=+100.448397106" Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.633808 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-krv84" event={"ID":"5912e701-8be4-4f6b-94a0-6ab69f81cef5","Type":"ContainerStarted","Data":"2d8d9ce31f2b74339aa87f6ebecce76ec0c786a3dd83ff3ba38de85930efdfd7"} Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.636187 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:14 crc kubenswrapper[4854]: E1125 09:38:14.639003 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:15.138982993 +0000 UTC m=+100.991976369 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.641784 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-pxbm7" event={"ID":"4b64fc1e-475a-4d69-a7ac-a23a7b5a7909","Type":"ContainerStarted","Data":"bb769b42fc682475464aab688dcbc2cf0e7eb3a9d4775ab5cb44d782f13d5a8c"} Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.646462 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2kdq6" event={"ID":"eaeeed45-4bb5-446f-8afb-67ae1c8532c8","Type":"ContainerStarted","Data":"3f9aeeb524f1340b32a20ad47c5f9bb14be5940fcd5c35035e46c6d484157721"} Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.659910 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-h5cbk" event={"ID":"24be6029-f079-49a0-904e-6f072eafba7e","Type":"ContainerStarted","Data":"aa36c1f64a733fc0a9ef6616a456d9f06ddeac2c655950967518c690230d028c"} Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.669473 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-krv84" podStartSLOduration=77.666854141 podStartE2EDuration="1m17.666854141s" podCreationTimestamp="2025-11-25 09:36:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:14.666381197 +0000 UTC m=+100.519374593" watchObservedRunningTime="2025-11-25 09:38:14.666854141 +0000 UTC m=+100.519847517" Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.676342 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-mvn6l" podStartSLOduration=78.676322188 podStartE2EDuration="1m18.676322188s" podCreationTimestamp="2025-11-25 09:36:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:14.633430356 +0000 UTC m=+100.486423732" watchObservedRunningTime="2025-11-25 09:38:14.676322188 +0000 UTC m=+100.529315564" Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.679833 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-chcz8" event={"ID":"b236ed1d-4b1e-4910-9df1-0db7353a28c5","Type":"ContainerStarted","Data":"0b7fb970b0ce13084068249f94713bb298583813521c6522fe6295ac620cc19e"} Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.693938 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-h5cbk" podStartSLOduration=77.693919785 podStartE2EDuration="1m17.693919785s" podCreationTimestamp="2025-11-25 09:36:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:14.692468265 +0000 UTC m=+100.545461641" watchObservedRunningTime="2025-11-25 09:38:14.693919785 +0000 UTC m=+100.546913161" Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.696820 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rlq4k" event={"ID":"10e58b8d-5283-40d1-9707-d844963263fe","Type":"ContainerStarted","Data":"3fafb62c6c0cae65a51f8301aa3c8c7f997402fcded267ffbdc4d7a84858fa53"} Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.728329 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-mz9k6" event={"ID":"9117eb3b-0187-4957-b851-da9e4c229c8f","Type":"ContainerStarted","Data":"7c5ab223884486e83e8b38ca08cd3fd9f322a85ea0a85aa06a26c4a5c089a915"} Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.729096 4854 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-mz9k6 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/healthz\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.729139 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-mz9k6" podUID="9117eb3b-0187-4957-b851-da9e4c229c8f" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.28:8080/healthz\": dial tcp 10.217.0.28:8080: connect: connection refused" Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.731943 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2kdq6" podStartSLOduration=78.73192888 podStartE2EDuration="1m18.73192888s" podCreationTimestamp="2025-11-25 09:36:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:14.729718368 +0000 UTC m=+100.582711744" watchObservedRunningTime="2025-11-25 09:38:14.73192888 +0000 UTC m=+100.584922256" Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.750438 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:14 crc kubenswrapper[4854]: E1125 09:38:14.752120 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:15.252103851 +0000 UTC m=+101.105097277 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.753177 4854 patch_prober.go:28] interesting pod/router-default-5444994796-vc2n9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:38:14 crc kubenswrapper[4854]: [-]has-synced failed: reason withheld Nov 25 09:38:14 crc kubenswrapper[4854]: [+]process-running ok Nov 25 09:38:14 crc kubenswrapper[4854]: healthz check failed Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.753230 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vc2n9" podUID="1e52d13d-15dd-437f-8d2d-88709419d1f2" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.823403 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-sb8md" event={"ID":"1a9dfec2-b952-4e84-9d99-377792feb851","Type":"ContainerStarted","Data":"f129035c2358a5e4bd66fe8198f8a2ec93d3036cd6cb121bf886ae665e188aea"} Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.823446 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-sb8md" event={"ID":"1a9dfec2-b952-4e84-9d99-377792feb851","Type":"ContainerStarted","Data":"671cdde75ab6839da189701a85029cbdfdc4da0fc64de423926b3b17f8c826bc"} Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.834307 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-p2x8c" event={"ID":"a4854090-19c4-4d8a-9023-1f3ae3fac5d5","Type":"ContainerStarted","Data":"735966490a549cda35bc0ae4255b6fec20d1b15a1bd6f10c0b48eb9237f69713"} Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.835270 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-p2x8c" Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.836224 4854 patch_prober.go:28] interesting pod/console-operator-58897d9998-p2x8c container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.39:8443/readyz\": dial tcp 10.217.0.39:8443: connect: connection refused" start-of-body= Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.836262 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-p2x8c" podUID="a4854090-19c4-4d8a-9023-1f3ae3fac5d5" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.39:8443/readyz\": dial tcp 10.217.0.39:8443: connect: connection refused" Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.842547 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6qzzx" event={"ID":"243c3e75-c67a-4dcf-b76d-bc1920af0a41","Type":"ContainerStarted","Data":"82d3ff152780d17fb41039cd53f5a05b12bd9ecb3cf2ef9e1a13c59669c39f9b"} Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.842590 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6qzzx" event={"ID":"243c3e75-c67a-4dcf-b76d-bc1920af0a41","Type":"ContainerStarted","Data":"2d8accc69ff98beffee524c86389d9d2d757d6197352176385fc75a24f23aa2a"} Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.843054 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6qzzx" Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.848764 4854 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-6qzzx container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.26:8443/healthz\": dial tcp 10.217.0.26:8443: connect: connection refused" start-of-body= Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.848825 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6qzzx" podUID="243c3e75-c67a-4dcf-b76d-bc1920af0a41" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.26:8443/healthz\": dial tcp 10.217.0.26:8443: connect: connection refused" Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.848774 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-52bvk" event={"ID":"dc1611b6-bd24-4ae5-a332-3a336786b9d5","Type":"ContainerStarted","Data":"150883b4dd35e727e733905d2893ce663fc1a8a287bc5ea25ebc197896335faf"} Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.848899 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-52bvk" event={"ID":"dc1611b6-bd24-4ae5-a332-3a336786b9d5","Type":"ContainerStarted","Data":"ceb63bcb05e2217ac83abb393a5d3a09d3b0415f7b9a8c1e568de88d7ccc38c9"} Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.851093 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:14 crc kubenswrapper[4854]: E1125 09:38:14.852556 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:15.35252348 +0000 UTC m=+101.205516856 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.861347 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-chcz8" podStartSLOduration=77.861325529 podStartE2EDuration="1m17.861325529s" podCreationTimestamp="2025-11-25 09:36:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:14.779946048 +0000 UTC m=+100.632939424" watchObservedRunningTime="2025-11-25 09:38:14.861325529 +0000 UTC m=+100.714318905" Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.864828 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-542js" event={"ID":"30a1a0d5-62ce-4910-a239-58d552e12e59","Type":"ContainerStarted","Data":"71e6ebde45e4600f303f71dcddaac152a9458255e7870080e53f86e5b4ae7f49"} Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.864868 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-542js" event={"ID":"30a1a0d5-62ce-4910-a239-58d552e12e59","Type":"ContainerStarted","Data":"b42436a6a7dcc5d54c7b77956a1508f689f390f272b1616ecbba7f641157fd9c"} Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.895611 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bdb45" event={"ID":"64098d81-ada3-4e0d-ac74-3d94b7247437","Type":"ContainerStarted","Data":"c1f23d5d7ad3b3ba9a09b906d0c4ec2b093bb40e1f3b1fa1e7e5c440d96c5406"} Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.907829 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-sb8md" podStartSLOduration=78.907812084 podStartE2EDuration="1m18.907812084s" podCreationTimestamp="2025-11-25 09:36:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:14.857947474 +0000 UTC m=+100.710940850" watchObservedRunningTime="2025-11-25 09:38:14.907812084 +0000 UTC m=+100.760805460" Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.909306 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-m5689" event={"ID":"97e545f8-81c2-400b-a339-b2b3a1958492","Type":"ContainerStarted","Data":"69c8799d3c676a5d8fbe8cb73a69682373e72a3b4e81a047c0bf95157e1ce0e6"} Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.909342 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-m5689" event={"ID":"97e545f8-81c2-400b-a339-b2b3a1958492","Type":"ContainerStarted","Data":"4194399735ad3d15c6809261046f71ab943d9557f41bc896de4c2e1fb13ab91e"} Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.952350 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:14 crc kubenswrapper[4854]: E1125 09:38:14.955557 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:15.455539333 +0000 UTC m=+101.308532789 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.960954 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-4nz5q" event={"ID":"d22cd363-91c5-46e0-af39-0f08466fbc92","Type":"ContainerStarted","Data":"2119cc4e5bd54a61ae1269ad44bb241444a2a286bcd2cc25bec40632b69ff018"} Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.981756 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6qzzx" podStartSLOduration=77.981740174 podStartE2EDuration="1m17.981740174s" podCreationTimestamp="2025-11-25 09:36:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:14.981060145 +0000 UTC m=+100.834053521" watchObservedRunningTime="2025-11-25 09:38:14.981740174 +0000 UTC m=+100.834733550" Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.983204 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-p2x8c" podStartSLOduration=78.983197315 podStartE2EDuration="1m18.983197315s" podCreationTimestamp="2025-11-25 09:36:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:14.909022717 +0000 UTC m=+100.762016093" watchObservedRunningTime="2025-11-25 09:38:14.983197315 +0000 UTC m=+100.836190691" Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.985070 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-cd58s" event={"ID":"8d4de16b-3a82-47e6-aab5-e41ab8f871fb","Type":"ContainerStarted","Data":"0eeae13f8762fcb0e59e1f0c41f696c23c177139bdded84b322ec7a572236ff7"} Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.986749 4854 generic.go:334] "Generic (PLEG): container finished" podID="f34856c6-ef13-4ff0-95b4-8a9f6b88b729" containerID="64ed0e3dda8f60161fa42e65f6189be9ea1fcab8092ff394ac993d94a3511a8c" exitCode=0 Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.986793 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5hvjb" event={"ID":"f34856c6-ef13-4ff0-95b4-8a9f6b88b729","Type":"ContainerDied","Data":"64ed0e3dda8f60161fa42e65f6189be9ea1fcab8092ff394ac993d94a3511a8c"} Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.999766 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-vhjzg" event={"ID":"c464b1b6-988b-430b-b6ac-6b5110888de8","Type":"ContainerStarted","Data":"522567f64ae7dd724d4feb8aebe37cb5b8d3db80b72ddf911eb14ed8684daf1c"} Nov 25 09:38:14 crc kubenswrapper[4854]: I1125 09:38:14.999805 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-vhjzg" event={"ID":"c464b1b6-988b-430b-b6ac-6b5110888de8","Type":"ContainerStarted","Data":"89f357135bd2e31f0937643e688c46fb7282386f855dbd15be8da2b6d4568c87"} Nov 25 09:38:15 crc kubenswrapper[4854]: I1125 09:38:15.021320 4854 patch_prober.go:28] interesting pod/downloads-7954f5f757-jvdld container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Nov 25 09:38:15 crc kubenswrapper[4854]: I1125 09:38:15.021363 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-jvdld" podUID="a2d7193d-eb53-446a-a96c-49d28dbbe724" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Nov 25 09:38:15 crc kubenswrapper[4854]: I1125 09:38:15.043912 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-m5689" podStartSLOduration=79.043895511 podStartE2EDuration="1m19.043895511s" podCreationTimestamp="2025-11-25 09:36:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:15.040962598 +0000 UTC m=+100.893955974" watchObservedRunningTime="2025-11-25 09:38:15.043895511 +0000 UTC m=+100.896888887" Nov 25 09:38:15 crc kubenswrapper[4854]: I1125 09:38:15.046024 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4nzhc" event={"ID":"eb03f4b9-cf07-48bd-98a6-f0b9dfdc4e7b","Type":"ContainerStarted","Data":"5e97bcd3115c0d3cd9955dd33f6f83b55171978d563a6fc66627f71192bfb47e"} Nov 25 09:38:15 crc kubenswrapper[4854]: I1125 09:38:15.046052 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4nzhc" event={"ID":"eb03f4b9-cf07-48bd-98a6-f0b9dfdc4e7b","Type":"ContainerStarted","Data":"198f501913c0a96c89685af906eaf75c455589517877907abeb38998a5920449"} Nov 25 09:38:15 crc kubenswrapper[4854]: I1125 09:38:15.053385 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:15 crc kubenswrapper[4854]: I1125 09:38:15.053852 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-cbzc5" Nov 25 09:38:15 crc kubenswrapper[4854]: I1125 09:38:15.053928 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/377b0f2c-4152-40db-a4c1-be3126061d7e-metrics-certs\") pod \"network-metrics-daemon-rbb99\" (UID: \"377b0f2c-4152-40db-a4c1-be3126061d7e\") " pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:38:15 crc kubenswrapper[4854]: E1125 09:38:15.054406 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:15.554384828 +0000 UTC m=+101.407378204 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:15 crc kubenswrapper[4854]: I1125 09:38:15.055862 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r5wdv" Nov 25 09:38:15 crc kubenswrapper[4854]: I1125 09:38:15.065749 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/377b0f2c-4152-40db-a4c1-be3126061d7e-metrics-certs\") pod \"network-metrics-daemon-rbb99\" (UID: \"377b0f2c-4152-40db-a4c1-be3126061d7e\") " pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:38:15 crc kubenswrapper[4854]: I1125 09:38:15.127053 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-542js" podStartSLOduration=78.127039492 podStartE2EDuration="1m18.127039492s" podCreationTimestamp="2025-11-25 09:36:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:15.085555899 +0000 UTC m=+100.938549275" watchObservedRunningTime="2025-11-25 09:38:15.127039492 +0000 UTC m=+100.980032868" Nov 25 09:38:15 crc kubenswrapper[4854]: I1125 09:38:15.149907 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rbb99" Nov 25 09:38:15 crc kubenswrapper[4854]: I1125 09:38:15.156311 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:15 crc kubenswrapper[4854]: E1125 09:38:15.161219 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:15.661196748 +0000 UTC m=+101.514190124 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:15 crc kubenswrapper[4854]: I1125 09:38:15.196815 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6rfsb" Nov 25 09:38:15 crc kubenswrapper[4854]: I1125 09:38:15.222458 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4nzhc" podStartSLOduration=79.222426679 podStartE2EDuration="1m19.222426679s" podCreationTimestamp="2025-11-25 09:36:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:15.170257114 +0000 UTC m=+101.023250480" watchObservedRunningTime="2025-11-25 09:38:15.222426679 +0000 UTC m=+101.075420055" Nov 25 09:38:15 crc kubenswrapper[4854]: I1125 09:38:15.258209 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:15 crc kubenswrapper[4854]: E1125 09:38:15.258503 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:15.758488589 +0000 UTC m=+101.611481965 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:15 crc kubenswrapper[4854]: I1125 09:38:15.299838 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-vhjzg" podStartSLOduration=78.299820238 podStartE2EDuration="1m18.299820238s" podCreationTimestamp="2025-11-25 09:36:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:15.262758699 +0000 UTC m=+101.115752095" watchObservedRunningTime="2025-11-25 09:38:15.299820238 +0000 UTC m=+101.152813634" Nov 25 09:38:15 crc kubenswrapper[4854]: I1125 09:38:15.325445 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-cd58s" podStartSLOduration=78.325422242 podStartE2EDuration="1m18.325422242s" podCreationTimestamp="2025-11-25 09:36:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:15.325204545 +0000 UTC m=+101.178197931" watchObservedRunningTime="2025-11-25 09:38:15.325422242 +0000 UTC m=+101.178415618" Nov 25 09:38:15 crc kubenswrapper[4854]: I1125 09:38:15.363315 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:15 crc kubenswrapper[4854]: E1125 09:38:15.363743 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:15.863728305 +0000 UTC m=+101.716721681 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:15 crc kubenswrapper[4854]: I1125 09:38:15.465389 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:15 crc kubenswrapper[4854]: E1125 09:38:15.466046 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:15.966027907 +0000 UTC m=+101.819021283 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:15 crc kubenswrapper[4854]: I1125 09:38:15.505423 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:15 crc kubenswrapper[4854]: I1125 09:38:15.505738 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:15 crc kubenswrapper[4854]: I1125 09:38:15.508763 4854 patch_prober.go:28] interesting pod/apiserver-76f77b778f-sb8md container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="Get \"https://10.217.0.5:8443/livez\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Nov 25 09:38:15 crc kubenswrapper[4854]: I1125 09:38:15.508801 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-sb8md" podUID="1a9dfec2-b952-4e84-9d99-377792feb851" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.5:8443/livez\": dial tcp 10.217.0.5:8443: connect: connection refused" Nov 25 09:38:15 crc kubenswrapper[4854]: I1125 09:38:15.568113 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:15 crc kubenswrapper[4854]: E1125 09:38:15.568570 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:16.068559466 +0000 UTC m=+101.921552842 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:15 crc kubenswrapper[4854]: I1125 09:38:15.669484 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:15 crc kubenswrapper[4854]: E1125 09:38:15.670155 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:16.170140188 +0000 UTC m=+102.023133564 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:15 crc kubenswrapper[4854]: I1125 09:38:15.755793 4854 patch_prober.go:28] interesting pod/router-default-5444994796-vc2n9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:38:15 crc kubenswrapper[4854]: [-]has-synced failed: reason withheld Nov 25 09:38:15 crc kubenswrapper[4854]: [+]process-running ok Nov 25 09:38:15 crc kubenswrapper[4854]: healthz check failed Nov 25 09:38:15 crc kubenswrapper[4854]: I1125 09:38:15.755854 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vc2n9" podUID="1e52d13d-15dd-437f-8d2d-88709419d1f2" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:38:15 crc kubenswrapper[4854]: I1125 09:38:15.776359 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:15 crc kubenswrapper[4854]: E1125 09:38:15.776650 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:16.27663794 +0000 UTC m=+102.129631316 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:15 crc kubenswrapper[4854]: I1125 09:38:15.890320 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:15 crc kubenswrapper[4854]: E1125 09:38:15.890745 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:16.390729226 +0000 UTC m=+102.243722602 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:15 crc kubenswrapper[4854]: I1125 09:38:15.894275 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-rbb99"] Nov 25 09:38:15 crc kubenswrapper[4854]: I1125 09:38:15.992578 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:15 crc kubenswrapper[4854]: E1125 09:38:15.992944 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:16.492912745 +0000 UTC m=+102.345906121 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.024826 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-rbb99" event={"ID":"377b0f2c-4152-40db-a4c1-be3126061d7e","Type":"ContainerStarted","Data":"24cedac1c3eacd9cfcbef220582f2991323059387899d5d9c9d12cd351592ce3"} Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.025742 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-t99fd" event={"ID":"00a6c4d0-b375-4cf1-af7f-a9a27621d050","Type":"ContainerStarted","Data":"e459b6082c5076dda9616d6078c478829d16a62a404cc4776b885a205e78c2bd"} Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.027238 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-chcz8" event={"ID":"b236ed1d-4b1e-4910-9df1-0db7353a28c5","Type":"ContainerStarted","Data":"6057c6f16e1af73ae68c841fe25c45d138e970af973854464f0be16cd81187c2"} Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.028724 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8q9m5" event={"ID":"3519fd59-259f-49f7-875d-4080e17ffe6f","Type":"ContainerStarted","Data":"910a77e0adcca875eb6626c7ecb5b6500acc92eff0d2ff7034771be535c53807"} Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.030827 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4nzhc" event={"ID":"eb03f4b9-cf07-48bd-98a6-f0b9dfdc4e7b","Type":"ContainerStarted","Data":"ba7bf4b10044bedda72805325de87e7f01d75b8fc3e9ad3cd25a90dfe5eff183"} Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.032738 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-452s8" event={"ID":"c49abfb3-cae2-48dc-93ef-0fb8d3853caf","Type":"ContainerStarted","Data":"a6f3599a165601552636d9ee05e5242a945945b36a9f66bd2ee16b40930d2d51"} Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.034281 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-pxbm7" event={"ID":"4b64fc1e-475a-4d69-a7ac-a23a7b5a7909","Type":"ContainerStarted","Data":"718995d6fd0d6f356e13018702800bbac7fef1f5d80c31c6957a6f77f138dc52"} Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.035623 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-4nz5q" event={"ID":"d22cd363-91c5-46e0-af39-0f08466fbc92","Type":"ContainerStarted","Data":"2b1e5a8c795b19f5f5ba6ac7ebe551d7c9082ca0d020249149a01f8dbad587aa"} Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.035645 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-4nz5q" event={"ID":"d22cd363-91c5-46e0-af39-0f08466fbc92","Type":"ContainerStarted","Data":"22eb80f905ca5137ddde387009e4c177456cb40852ca90f761ca8ac1229fe3c5"} Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.036095 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-4nz5q" Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.037492 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-vhjzg" event={"ID":"c464b1b6-988b-430b-b6ac-6b5110888de8","Type":"ContainerStarted","Data":"b0c793c8eb91c960dc6a555d9b2abad5c9f073e7578ac251abeb5241eaf3ef22"} Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.039234 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-h5cbk" event={"ID":"24be6029-f079-49a0-904e-6f072eafba7e","Type":"ContainerStarted","Data":"fc5ecede7e142570ff74b57bbbb9577f0d1a8206fb07621b65d4dd2cf9e08e6f"} Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.040822 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-52bvk" event={"ID":"dc1611b6-bd24-4ae5-a332-3a336786b9d5","Type":"ContainerStarted","Data":"002bdb00a4256b78d4829ada8cb72a852562a8512a76739e0485f58d203e4da5"} Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.043089 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5hvjb" event={"ID":"f34856c6-ef13-4ff0-95b4-8a9f6b88b729","Type":"ContainerStarted","Data":"d5a71514787b67befdf3ff68bffe589159402f9596f1c25c6754c5daf01a97c2"} Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.044608 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-kjgl8" event={"ID":"708f81b5-e3db-4346-9fd5-025ded50d4aa","Type":"ContainerStarted","Data":"76dd4eea18476383be6fb49787c3bdb82a1b1ecdb6a6e09dbdba604099578259"} Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.046285 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-6c7r5" event={"ID":"4293c24a-f631-41fb-9065-b157c62cd0d3","Type":"ContainerStarted","Data":"ff7873486eff6226d3c737272f803914e9b519d9a351b725a0f3b9a9d8300243"} Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.046458 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-6c7r5" Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.048550 4854 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-mz9k6 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/healthz\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.048583 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-mz9k6" podUID="9117eb3b-0187-4957-b851-da9e4c229c8f" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.28:8080/healthz\": dial tcp 10.217.0.28:8080: connect: connection refused" Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.049271 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-542js" event={"ID":"30a1a0d5-62ce-4910-a239-58d552e12e59","Type":"ContainerStarted","Data":"aa9c29cc51013437911ea7c7f7cde516ac54b61b484dccd5351fe072a7d9e0b3"} Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.053005 4854 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-hm6rl container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" start-of-body= Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.053043 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hm6rl" podUID="10866420-6a3f-42b5-b416-fa3e70f94a20" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.086638 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-p2x8c" Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.093221 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:16 crc kubenswrapper[4854]: E1125 09:38:16.093345 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:16.593309813 +0000 UTC m=+102.446303189 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.093451 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:16 crc kubenswrapper[4854]: E1125 09:38:16.093800 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:16.593791967 +0000 UTC m=+102.446785343 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.096183 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-t99fd" podStartSLOduration=80.096170524 podStartE2EDuration="1m20.096170524s" podCreationTimestamp="2025-11-25 09:36:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:16.077254719 +0000 UTC m=+101.930248105" watchObservedRunningTime="2025-11-25 09:38:16.096170524 +0000 UTC m=+101.949163900" Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.096942 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-52bvk" podStartSLOduration=80.096937146 podStartE2EDuration="1m20.096937146s" podCreationTimestamp="2025-11-25 09:36:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:16.096308588 +0000 UTC m=+101.949301964" watchObservedRunningTime="2025-11-25 09:38:16.096937146 +0000 UTC m=+101.949930522" Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.098953 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6qzzx" Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.183683 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-4nz5q" podStartSLOduration=8.183655768 podStartE2EDuration="8.183655768s" podCreationTimestamp="2025-11-25 09:38:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:16.182879806 +0000 UTC m=+102.035873202" watchObservedRunningTime="2025-11-25 09:38:16.183655768 +0000 UTC m=+102.036649144" Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.184324 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5hvjb" podStartSLOduration=79.184319096 podStartE2EDuration="1m19.184319096s" podCreationTimestamp="2025-11-25 09:36:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:16.156370386 +0000 UTC m=+102.009363782" watchObservedRunningTime="2025-11-25 09:38:16.184319096 +0000 UTC m=+102.037312472" Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.194398 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:16 crc kubenswrapper[4854]: E1125 09:38:16.198987 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:16.698971941 +0000 UTC m=+102.551965317 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.286903 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8q9m5" podStartSLOduration=79.286885977 podStartE2EDuration="1m19.286885977s" podCreationTimestamp="2025-11-25 09:36:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:16.286167387 +0000 UTC m=+102.139160773" watchObservedRunningTime="2025-11-25 09:38:16.286885977 +0000 UTC m=+102.139879353" Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.288560 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-6c7r5" podStartSLOduration=79.288547864 podStartE2EDuration="1m19.288547864s" podCreationTimestamp="2025-11-25 09:36:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:16.236046529 +0000 UTC m=+102.089039905" watchObservedRunningTime="2025-11-25 09:38:16.288547864 +0000 UTC m=+102.141541240" Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.293778 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5hvjb" Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.302630 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:16 crc kubenswrapper[4854]: E1125 09:38:16.302925 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:16.80291329 +0000 UTC m=+102.655906666 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.303091 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5hvjb" Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.305793 4854 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-5hvjb container/oauth-apiserver namespace/openshift-oauth-apiserver: Startup probe status=failure output="Get \"https://10.217.0.12:8443/livez\": dial tcp 10.217.0.12:8443: connect: connection refused" start-of-body= Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.305852 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5hvjb" podUID="f34856c6-ef13-4ff0-95b4-8a9f6b88b729" containerName="oauth-apiserver" probeResult="failure" output="Get \"https://10.217.0.12:8443/livez\": dial tcp 10.217.0.12:8443: connect: connection refused" Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.379584 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-452s8" podStartSLOduration=79.379567187 podStartE2EDuration="1m19.379567187s" podCreationTimestamp="2025-11-25 09:36:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:16.33402143 +0000 UTC m=+102.187014806" watchObservedRunningTime="2025-11-25 09:38:16.379567187 +0000 UTC m=+102.232560563" Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.404174 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:16 crc kubenswrapper[4854]: E1125 09:38:16.404923 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:16.904906944 +0000 UTC m=+102.757900320 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.506002 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:16 crc kubenswrapper[4854]: E1125 09:38:16.506415 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:17.006402523 +0000 UTC m=+102.859395899 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.607302 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:16 crc kubenswrapper[4854]: E1125 09:38:16.607419 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:17.107398349 +0000 UTC m=+102.960391725 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.607555 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:16 crc kubenswrapper[4854]: E1125 09:38:16.607907 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:17.107896554 +0000 UTC m=+102.960889930 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.709095 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:16 crc kubenswrapper[4854]: E1125 09:38:16.709489 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:17.209473136 +0000 UTC m=+103.062466512 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.751855 4854 patch_prober.go:28] interesting pod/router-default-5444994796-vc2n9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:38:16 crc kubenswrapper[4854]: [-]has-synced failed: reason withheld Nov 25 09:38:16 crc kubenswrapper[4854]: [+]process-running ok Nov 25 09:38:16 crc kubenswrapper[4854]: healthz check failed Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.751911 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vc2n9" podUID="1e52d13d-15dd-437f-8d2d-88709419d1f2" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.810223 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:16 crc kubenswrapper[4854]: E1125 09:38:16.810600 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:17.310586094 +0000 UTC m=+103.163579470 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:16 crc kubenswrapper[4854]: I1125 09:38:16.911345 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:16 crc kubenswrapper[4854]: E1125 09:38:16.911793 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:17.411774395 +0000 UTC m=+103.264767781 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:17 crc kubenswrapper[4854]: I1125 09:38:17.012978 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:17 crc kubenswrapper[4854]: E1125 09:38:17.013221 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:17.513210383 +0000 UTC m=+103.366203759 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:17 crc kubenswrapper[4854]: I1125 09:38:17.055371 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-rbb99" event={"ID":"377b0f2c-4152-40db-a4c1-be3126061d7e","Type":"ContainerStarted","Data":"31688ead7272f37448bcc1132d61d26f04bf47d986248c96a87bf2dfd49740d0"} Nov 25 09:38:17 crc kubenswrapper[4854]: I1125 09:38:17.077265 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hm6rl" Nov 25 09:38:17 crc kubenswrapper[4854]: I1125 09:38:17.114585 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:17 crc kubenswrapper[4854]: E1125 09:38:17.115136 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:17.615099555 +0000 UTC m=+103.468092941 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:17 crc kubenswrapper[4854]: I1125 09:38:17.115543 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:17 crc kubenswrapper[4854]: E1125 09:38:17.117522 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:17.617507483 +0000 UTC m=+103.470500859 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:17 crc kubenswrapper[4854]: I1125 09:38:17.218031 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:17 crc kubenswrapper[4854]: E1125 09:38:17.218372 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:17.718351124 +0000 UTC m=+103.571344510 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:17 crc kubenswrapper[4854]: I1125 09:38:17.219891 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:17 crc kubenswrapper[4854]: E1125 09:38:17.223414 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:17.723404646 +0000 UTC m=+103.576398112 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:17 crc kubenswrapper[4854]: I1125 09:38:17.321688 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:17 crc kubenswrapper[4854]: E1125 09:38:17.321857 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:17.82183112 +0000 UTC m=+103.674824496 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:17 crc kubenswrapper[4854]: I1125 09:38:17.321952 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:17 crc kubenswrapper[4854]: E1125 09:38:17.322321 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:17.822313074 +0000 UTC m=+103.675306450 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:17 crc kubenswrapper[4854]: I1125 09:38:17.423348 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:17 crc kubenswrapper[4854]: E1125 09:38:17.423515 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:17.923490394 +0000 UTC m=+103.776483780 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:17 crc kubenswrapper[4854]: I1125 09:38:17.423629 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:17 crc kubenswrapper[4854]: E1125 09:38:17.423939 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:17.923931187 +0000 UTC m=+103.776924563 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:17 crc kubenswrapper[4854]: I1125 09:38:17.545727 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:17 crc kubenswrapper[4854]: E1125 09:38:17.545890 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:18.045862164 +0000 UTC m=+103.898855540 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:17 crc kubenswrapper[4854]: I1125 09:38:17.546223 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:17 crc kubenswrapper[4854]: E1125 09:38:17.546543 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:18.046534853 +0000 UTC m=+103.899528229 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:17 crc kubenswrapper[4854]: I1125 09:38:17.647241 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:17 crc kubenswrapper[4854]: E1125 09:38:17.647425 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:18.147398475 +0000 UTC m=+104.000391841 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:17 crc kubenswrapper[4854]: I1125 09:38:17.647598 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:17 crc kubenswrapper[4854]: E1125 09:38:17.647979 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:18.147967071 +0000 UTC m=+104.000960447 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:17 crc kubenswrapper[4854]: I1125 09:38:17.748832 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:17 crc kubenswrapper[4854]: E1125 09:38:17.748963 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:18.248939956 +0000 UTC m=+104.101933332 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:17 crc kubenswrapper[4854]: I1125 09:38:17.749184 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:17 crc kubenswrapper[4854]: E1125 09:38:17.749498 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:18.249489991 +0000 UTC m=+104.102483367 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:17 crc kubenswrapper[4854]: I1125 09:38:17.750074 4854 patch_prober.go:28] interesting pod/router-default-5444994796-vc2n9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:38:17 crc kubenswrapper[4854]: [-]has-synced failed: reason withheld Nov 25 09:38:17 crc kubenswrapper[4854]: [+]process-running ok Nov 25 09:38:17 crc kubenswrapper[4854]: healthz check failed Nov 25 09:38:17 crc kubenswrapper[4854]: I1125 09:38:17.750125 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vc2n9" podUID="1e52d13d-15dd-437f-8d2d-88709419d1f2" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:38:17 crc kubenswrapper[4854]: I1125 09:38:17.850409 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:17 crc kubenswrapper[4854]: E1125 09:38:17.850595 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:18.350566279 +0000 UTC m=+104.203559655 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:17 crc kubenswrapper[4854]: I1125 09:38:17.850683 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:17 crc kubenswrapper[4854]: E1125 09:38:17.851057 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:18.351044473 +0000 UTC m=+104.204037849 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:17 crc kubenswrapper[4854]: I1125 09:38:17.952280 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:17 crc kubenswrapper[4854]: E1125 09:38:17.952426 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:18.452392819 +0000 UTC m=+104.305386195 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:17 crc kubenswrapper[4854]: I1125 09:38:17.952473 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:17 crc kubenswrapper[4854]: E1125 09:38:17.952810 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:18.45279904 +0000 UTC m=+104.305792416 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.054040 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:18 crc kubenswrapper[4854]: E1125 09:38:18.054232 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:18.554208398 +0000 UTC m=+104.407201774 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.054353 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:18 crc kubenswrapper[4854]: E1125 09:38:18.054678 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:18.5546581 +0000 UTC m=+104.407651476 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.064284 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-rbb99" event={"ID":"377b0f2c-4152-40db-a4c1-be3126061d7e","Type":"ContainerStarted","Data":"d129e77477ffa79b5a5a82de54d81a3459f11845e7afe5b5eed7bcf3c3322460"} Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.088034 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-rbb99" podStartSLOduration=83.088012833 podStartE2EDuration="1m23.088012833s" podCreationTimestamp="2025-11-25 09:36:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:18.086904242 +0000 UTC m=+103.939897638" watchObservedRunningTime="2025-11-25 09:38:18.088012833 +0000 UTC m=+103.941006209" Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.156015 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:18 crc kubenswrapper[4854]: E1125 09:38:18.156252 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:18.656223452 +0000 UTC m=+104.509216838 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.156383 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:18 crc kubenswrapper[4854]: E1125 09:38:18.157852 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:18.657836418 +0000 UTC m=+104.510829794 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.258287 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:18 crc kubenswrapper[4854]: E1125 09:38:18.258444 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:18.758424532 +0000 UTC m=+104.611417918 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.258494 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:18 crc kubenswrapper[4854]: E1125 09:38:18.258863 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:18.758853854 +0000 UTC m=+104.611847230 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.276327 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.277053 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.284963 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.285630 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.287757 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.359191 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:18 crc kubenswrapper[4854]: E1125 09:38:18.359486 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:18.859455148 +0000 UTC m=+104.712448524 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.359621 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0edb61e7-9266-4c0b-a53b-7a50c7e80a9c-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"0edb61e7-9266-4c0b-a53b-7a50c7e80a9c\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.359705 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.359842 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0edb61e7-9266-4c0b-a53b-7a50c7e80a9c-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"0edb61e7-9266-4c0b-a53b-7a50c7e80a9c\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:38:18 crc kubenswrapper[4854]: E1125 09:38:18.360043 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:18.860026284 +0000 UTC m=+104.713019660 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.460505 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.460766 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0edb61e7-9266-4c0b-a53b-7a50c7e80a9c-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"0edb61e7-9266-4c0b-a53b-7a50c7e80a9c\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.460831 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0edb61e7-9266-4c0b-a53b-7a50c7e80a9c-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"0edb61e7-9266-4c0b-a53b-7a50c7e80a9c\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.460958 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0edb61e7-9266-4c0b-a53b-7a50c7e80a9c-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"0edb61e7-9266-4c0b-a53b-7a50c7e80a9c\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:38:18 crc kubenswrapper[4854]: E1125 09:38:18.461301 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:18.961261157 +0000 UTC m=+104.814254533 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.485222 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0edb61e7-9266-4c0b-a53b-7a50c7e80a9c-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"0edb61e7-9266-4c0b-a53b-7a50c7e80a9c\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.561874 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:18 crc kubenswrapper[4854]: E1125 09:38:18.562283 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:19.062266413 +0000 UTC m=+104.915259859 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.595811 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.662741 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:18 crc kubenswrapper[4854]: E1125 09:38:18.662942 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:19.162903918 +0000 UTC m=+105.015897294 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.663072 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:18 crc kubenswrapper[4854]: E1125 09:38:18.663481 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:19.163469724 +0000 UTC m=+105.016463100 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.748850 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-64qr8"] Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.749982 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-64qr8" Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.761862 4854 patch_prober.go:28] interesting pod/router-default-5444994796-vc2n9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:38:18 crc kubenswrapper[4854]: [-]has-synced failed: reason withheld Nov 25 09:38:18 crc kubenswrapper[4854]: [+]process-running ok Nov 25 09:38:18 crc kubenswrapper[4854]: healthz check failed Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.761903 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vc2n9" podUID="1e52d13d-15dd-437f-8d2d-88709419d1f2" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.762045 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.762376 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-64qr8"] Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.763902 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:18 crc kubenswrapper[4854]: E1125 09:38:18.764023 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:19.264006557 +0000 UTC m=+105.116999933 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.767877 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:18 crc kubenswrapper[4854]: E1125 09:38:18.768325 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:19.268313469 +0000 UTC m=+105.121306855 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.868643 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:18 crc kubenswrapper[4854]: E1125 09:38:18.868807 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:19.36878921 +0000 UTC m=+105.221782586 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.868892 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b66bda32-eed5-4ea0-b10c-065038dce52d-utilities\") pod \"certified-operators-64qr8\" (UID: \"b66bda32-eed5-4ea0-b10c-065038dce52d\") " pod="openshift-marketplace/certified-operators-64qr8" Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.868923 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b66bda32-eed5-4ea0-b10c-065038dce52d-catalog-content\") pod \"certified-operators-64qr8\" (UID: \"b66bda32-eed5-4ea0-b10c-065038dce52d\") " pod="openshift-marketplace/certified-operators-64qr8" Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.868960 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.869041 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5wvsk\" (UniqueName: \"kubernetes.io/projected/b66bda32-eed5-4ea0-b10c-065038dce52d-kube-api-access-5wvsk\") pod \"certified-operators-64qr8\" (UID: \"b66bda32-eed5-4ea0-b10c-065038dce52d\") " pod="openshift-marketplace/certified-operators-64qr8" Nov 25 09:38:18 crc kubenswrapper[4854]: E1125 09:38:18.869214 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:19.369205062 +0000 UTC m=+105.222198438 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.925617 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-zznfv"] Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.927006 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zznfv" Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.929935 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.940269 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zznfv"] Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.993317 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:18 crc kubenswrapper[4854]: E1125 09:38:18.993396 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:19.493378765 +0000 UTC m=+105.346372141 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.993805 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/621fc295-6eae-4091-950e-c883d64bf7b8-utilities\") pod \"community-operators-zznfv\" (UID: \"621fc295-6eae-4091-950e-c883d64bf7b8\") " pod="openshift-marketplace/community-operators-zznfv" Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.993860 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b66bda32-eed5-4ea0-b10c-065038dce52d-utilities\") pod \"certified-operators-64qr8\" (UID: \"b66bda32-eed5-4ea0-b10c-065038dce52d\") " pod="openshift-marketplace/certified-operators-64qr8" Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.993903 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b66bda32-eed5-4ea0-b10c-065038dce52d-catalog-content\") pod \"certified-operators-64qr8\" (UID: \"b66bda32-eed5-4ea0-b10c-065038dce52d\") " pod="openshift-marketplace/certified-operators-64qr8" Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.993947 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.994005 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/621fc295-6eae-4091-950e-c883d64bf7b8-catalog-content\") pod \"community-operators-zznfv\" (UID: \"621fc295-6eae-4091-950e-c883d64bf7b8\") " pod="openshift-marketplace/community-operators-zznfv" Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.994084 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5wvsk\" (UniqueName: \"kubernetes.io/projected/b66bda32-eed5-4ea0-b10c-065038dce52d-kube-api-access-5wvsk\") pod \"certified-operators-64qr8\" (UID: \"b66bda32-eed5-4ea0-b10c-065038dce52d\") " pod="openshift-marketplace/certified-operators-64qr8" Nov 25 09:38:18 crc kubenswrapper[4854]: E1125 09:38:18.994273 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:19.494262861 +0000 UTC m=+105.347256237 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.994582 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5ml9\" (UniqueName: \"kubernetes.io/projected/621fc295-6eae-4091-950e-c883d64bf7b8-kube-api-access-g5ml9\") pod \"community-operators-zznfv\" (UID: \"621fc295-6eae-4091-950e-c883d64bf7b8\") " pod="openshift-marketplace/community-operators-zznfv" Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.994635 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b66bda32-eed5-4ea0-b10c-065038dce52d-utilities\") pod \"certified-operators-64qr8\" (UID: \"b66bda32-eed5-4ea0-b10c-065038dce52d\") " pod="openshift-marketplace/certified-operators-64qr8" Nov 25 09:38:18 crc kubenswrapper[4854]: I1125 09:38:18.994665 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b66bda32-eed5-4ea0-b10c-065038dce52d-catalog-content\") pod \"certified-operators-64qr8\" (UID: \"b66bda32-eed5-4ea0-b10c-065038dce52d\") " pod="openshift-marketplace/certified-operators-64qr8" Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.014079 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5wvsk\" (UniqueName: \"kubernetes.io/projected/b66bda32-eed5-4ea0-b10c-065038dce52d-kube-api-access-5wvsk\") pod \"certified-operators-64qr8\" (UID: \"b66bda32-eed5-4ea0-b10c-065038dce52d\") " pod="openshift-marketplace/certified-operators-64qr8" Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.056059 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.070116 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-pxbm7" event={"ID":"4b64fc1e-475a-4d69-a7ac-a23a7b5a7909","Type":"ContainerStarted","Data":"0465279ebc7db11b920d8a4e38294a4ad27fac78e498014ee2e2e24d0139dfb9"} Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.072251 4854 generic.go:334] "Generic (PLEG): container finished" podID="7ea5d904-b5da-4a4a-9221-f808841b0052" containerID="40d03eaa09f7fc9dfaaa634461b9a2040747d3879ba2b9a4625a1c97b0923f0d" exitCode=0 Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.072335 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-jck8z" event={"ID":"7ea5d904-b5da-4a4a-9221-f808841b0052","Type":"ContainerDied","Data":"40d03eaa09f7fc9dfaaa634461b9a2040747d3879ba2b9a4625a1c97b0923f0d"} Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.095600 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:19 crc kubenswrapper[4854]: E1125 09:38:19.095762 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:19.595741488 +0000 UTC m=+105.448734864 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.096732 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5ml9\" (UniqueName: \"kubernetes.io/projected/621fc295-6eae-4091-950e-c883d64bf7b8-kube-api-access-g5ml9\") pod \"community-operators-zznfv\" (UID: \"621fc295-6eae-4091-950e-c883d64bf7b8\") " pod="openshift-marketplace/community-operators-zznfv" Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.096800 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/621fc295-6eae-4091-950e-c883d64bf7b8-utilities\") pod \"community-operators-zznfv\" (UID: \"621fc295-6eae-4091-950e-c883d64bf7b8\") " pod="openshift-marketplace/community-operators-zznfv" Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.096848 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.096875 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/621fc295-6eae-4091-950e-c883d64bf7b8-catalog-content\") pod \"community-operators-zznfv\" (UID: \"621fc295-6eae-4091-950e-c883d64bf7b8\") " pod="openshift-marketplace/community-operators-zznfv" Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.097398 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/621fc295-6eae-4091-950e-c883d64bf7b8-utilities\") pod \"community-operators-zznfv\" (UID: \"621fc295-6eae-4091-950e-c883d64bf7b8\") " pod="openshift-marketplace/community-operators-zznfv" Nov 25 09:38:19 crc kubenswrapper[4854]: E1125 09:38:19.097407 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:19.597393004 +0000 UTC m=+105.450386380 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.098758 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/621fc295-6eae-4091-950e-c883d64bf7b8-catalog-content\") pod \"community-operators-zznfv\" (UID: \"621fc295-6eae-4091-950e-c883d64bf7b8\") " pod="openshift-marketplace/community-operators-zznfv" Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.113724 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5ml9\" (UniqueName: \"kubernetes.io/projected/621fc295-6eae-4091-950e-c883d64bf7b8-kube-api-access-g5ml9\") pod \"community-operators-zznfv\" (UID: \"621fc295-6eae-4091-950e-c883d64bf7b8\") " pod="openshift-marketplace/community-operators-zznfv" Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.117013 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-64qr8" Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.138761 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-nkb9k"] Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.139798 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nkb9k" Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.188986 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-nkb9k"] Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.201332 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.201501 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e66197a5-5610-4f03-bfb3-4952c7d530e2-catalog-content\") pod \"certified-operators-nkb9k\" (UID: \"e66197a5-5610-4f03-bfb3-4952c7d530e2\") " pod="openshift-marketplace/certified-operators-nkb9k" Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.201538 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e66197a5-5610-4f03-bfb3-4952c7d530e2-utilities\") pod \"certified-operators-nkb9k\" (UID: \"e66197a5-5610-4f03-bfb3-4952c7d530e2\") " pod="openshift-marketplace/certified-operators-nkb9k" Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.201568 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fmnhp\" (UniqueName: \"kubernetes.io/projected/e66197a5-5610-4f03-bfb3-4952c7d530e2-kube-api-access-fmnhp\") pod \"certified-operators-nkb9k\" (UID: \"e66197a5-5610-4f03-bfb3-4952c7d530e2\") " pod="openshift-marketplace/certified-operators-nkb9k" Nov 25 09:38:19 crc kubenswrapper[4854]: E1125 09:38:19.201702 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:19.701687633 +0000 UTC m=+105.554681009 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.303508 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e66197a5-5610-4f03-bfb3-4952c7d530e2-catalog-content\") pod \"certified-operators-nkb9k\" (UID: \"e66197a5-5610-4f03-bfb3-4952c7d530e2\") " pod="openshift-marketplace/certified-operators-nkb9k" Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.303789 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e66197a5-5610-4f03-bfb3-4952c7d530e2-utilities\") pod \"certified-operators-nkb9k\" (UID: \"e66197a5-5610-4f03-bfb3-4952c7d530e2\") " pod="openshift-marketplace/certified-operators-nkb9k" Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.303957 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fmnhp\" (UniqueName: \"kubernetes.io/projected/e66197a5-5610-4f03-bfb3-4952c7d530e2-kube-api-access-fmnhp\") pod \"certified-operators-nkb9k\" (UID: \"e66197a5-5610-4f03-bfb3-4952c7d530e2\") " pod="openshift-marketplace/certified-operators-nkb9k" Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.304053 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:19 crc kubenswrapper[4854]: E1125 09:38:19.304489 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:19.804428347 +0000 UTC m=+105.657421723 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.307132 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e66197a5-5610-4f03-bfb3-4952c7d530e2-utilities\") pod \"certified-operators-nkb9k\" (UID: \"e66197a5-5610-4f03-bfb3-4952c7d530e2\") " pod="openshift-marketplace/certified-operators-nkb9k" Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.307162 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e66197a5-5610-4f03-bfb3-4952c7d530e2-catalog-content\") pod \"certified-operators-nkb9k\" (UID: \"e66197a5-5610-4f03-bfb3-4952c7d530e2\") " pod="openshift-marketplace/certified-operators-nkb9k" Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.330159 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-4xq88"] Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.332402 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fmnhp\" (UniqueName: \"kubernetes.io/projected/e66197a5-5610-4f03-bfb3-4952c7d530e2-kube-api-access-fmnhp\") pod \"certified-operators-nkb9k\" (UID: \"e66197a5-5610-4f03-bfb3-4952c7d530e2\") " pod="openshift-marketplace/certified-operators-nkb9k" Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.332910 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4xq88" Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.333506 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zznfv" Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.342505 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4xq88"] Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.346105 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-64qr8"] Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.352443 4854 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 25 09:38:19 crc kubenswrapper[4854]: W1125 09:38:19.376983 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb66bda32_eed5_4ea0_b10c_065038dce52d.slice/crio-fbb5a809844fe20e8f03fed27fc5e0a69d9554ff63cf16e062348cc506a638e2 WatchSource:0}: Error finding container fbb5a809844fe20e8f03fed27fc5e0a69d9554ff63cf16e062348cc506a638e2: Status 404 returned error can't find the container with id fbb5a809844fe20e8f03fed27fc5e0a69d9554ff63cf16e062348cc506a638e2 Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.408024 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.408415 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ds4rn\" (UniqueName: \"kubernetes.io/projected/3066e5f3-b1f4-4415-8a74-32f39d2f8926-kube-api-access-ds4rn\") pod \"community-operators-4xq88\" (UID: \"3066e5f3-b1f4-4415-8a74-32f39d2f8926\") " pod="openshift-marketplace/community-operators-4xq88" Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.408444 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3066e5f3-b1f4-4415-8a74-32f39d2f8926-utilities\") pod \"community-operators-4xq88\" (UID: \"3066e5f3-b1f4-4415-8a74-32f39d2f8926\") " pod="openshift-marketplace/community-operators-4xq88" Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.408475 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3066e5f3-b1f4-4415-8a74-32f39d2f8926-catalog-content\") pod \"community-operators-4xq88\" (UID: \"3066e5f3-b1f4-4415-8a74-32f39d2f8926\") " pod="openshift-marketplace/community-operators-4xq88" Nov 25 09:38:19 crc kubenswrapper[4854]: E1125 09:38:19.408685 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:19.908650734 +0000 UTC m=+105.761644100 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.456772 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nkb9k" Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.510467 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.510538 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ds4rn\" (UniqueName: \"kubernetes.io/projected/3066e5f3-b1f4-4415-8a74-32f39d2f8926-kube-api-access-ds4rn\") pod \"community-operators-4xq88\" (UID: \"3066e5f3-b1f4-4415-8a74-32f39d2f8926\") " pod="openshift-marketplace/community-operators-4xq88" Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.510560 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3066e5f3-b1f4-4415-8a74-32f39d2f8926-utilities\") pod \"community-operators-4xq88\" (UID: \"3066e5f3-b1f4-4415-8a74-32f39d2f8926\") " pod="openshift-marketplace/community-operators-4xq88" Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.511003 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3066e5f3-b1f4-4415-8a74-32f39d2f8926-catalog-content\") pod \"community-operators-4xq88\" (UID: \"3066e5f3-b1f4-4415-8a74-32f39d2f8926\") " pod="openshift-marketplace/community-operators-4xq88" Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.511598 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3066e5f3-b1f4-4415-8a74-32f39d2f8926-utilities\") pod \"community-operators-4xq88\" (UID: \"3066e5f3-b1f4-4415-8a74-32f39d2f8926\") " pod="openshift-marketplace/community-operators-4xq88" Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.511623 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3066e5f3-b1f4-4415-8a74-32f39d2f8926-catalog-content\") pod \"community-operators-4xq88\" (UID: \"3066e5f3-b1f4-4415-8a74-32f39d2f8926\") " pod="openshift-marketplace/community-operators-4xq88" Nov 25 09:38:19 crc kubenswrapper[4854]: E1125 09:38:19.512059 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:20.012045928 +0000 UTC m=+105.865039304 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.530162 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ds4rn\" (UniqueName: \"kubernetes.io/projected/3066e5f3-b1f4-4415-8a74-32f39d2f8926-kube-api-access-ds4rn\") pod \"community-operators-4xq88\" (UID: \"3066e5f3-b1f4-4415-8a74-32f39d2f8926\") " pod="openshift-marketplace/community-operators-4xq88" Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.613417 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:19 crc kubenswrapper[4854]: E1125 09:38:19.613686 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:20.113639911 +0000 UTC m=+105.966633287 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.613831 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:19 crc kubenswrapper[4854]: E1125 09:38:19.614176 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:20.114163695 +0000 UTC m=+105.967157071 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.629974 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.674071 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4xq88" Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.715547 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:19 crc kubenswrapper[4854]: E1125 09:38:19.716608 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:20.216589381 +0000 UTC m=+106.069582757 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.743960 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zznfv"] Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.751350 4854 patch_prober.go:28] interesting pod/router-default-5444994796-vc2n9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:38:19 crc kubenswrapper[4854]: [-]has-synced failed: reason withheld Nov 25 09:38:19 crc kubenswrapper[4854]: [+]process-running ok Nov 25 09:38:19 crc kubenswrapper[4854]: healthz check failed Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.751395 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vc2n9" podUID="1e52d13d-15dd-437f-8d2d-88709419d1f2" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.792790 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-nkb9k"] Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.818143 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:19 crc kubenswrapper[4854]: E1125 09:38:19.818544 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:20.318526744 +0000 UTC m=+106.171520120 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.919570 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:19 crc kubenswrapper[4854]: E1125 09:38:19.919700 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:20.419655443 +0000 UTC m=+106.272648829 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:19 crc kubenswrapper[4854]: I1125 09:38:19.920128 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:19 crc kubenswrapper[4854]: E1125 09:38:19.920527 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:20.420509207 +0000 UTC m=+106.273502583 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.021087 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:20 crc kubenswrapper[4854]: E1125 09:38:20.021248 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:20.521222275 +0000 UTC m=+106.374215651 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.021332 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:20 crc kubenswrapper[4854]: E1125 09:38:20.021718 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:20.521703288 +0000 UTC m=+106.374696654 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.082201 4854 generic.go:334] "Generic (PLEG): container finished" podID="b66bda32-eed5-4ea0-b10c-065038dce52d" containerID="4f2a0f5b5eb6f111c16b3cf73a4306e6763b608263b1a9a10ed59d9ec27ada60" exitCode=0 Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.082282 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-64qr8" event={"ID":"b66bda32-eed5-4ea0-b10c-065038dce52d","Type":"ContainerDied","Data":"4f2a0f5b5eb6f111c16b3cf73a4306e6763b608263b1a9a10ed59d9ec27ada60"} Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.082314 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-64qr8" event={"ID":"b66bda32-eed5-4ea0-b10c-065038dce52d","Type":"ContainerStarted","Data":"fbb5a809844fe20e8f03fed27fc5e0a69d9554ff63cf16e062348cc506a638e2"} Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.084744 4854 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.084951 4854 generic.go:334] "Generic (PLEG): container finished" podID="0edb61e7-9266-4c0b-a53b-7a50c7e80a9c" containerID="8735c04485034526ad32a4d4d7f46be2a770ac9e970e773ea8af0f6ba4d4f939" exitCode=0 Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.085083 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"0edb61e7-9266-4c0b-a53b-7a50c7e80a9c","Type":"ContainerDied","Data":"8735c04485034526ad32a4d4d7f46be2a770ac9e970e773ea8af0f6ba4d4f939"} Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.085112 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"0edb61e7-9266-4c0b-a53b-7a50c7e80a9c","Type":"ContainerStarted","Data":"4b37ea32986d4cbb0a24913cbd5311d1fb43cfc8f4089d11682513ac930fe4fb"} Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.087853 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nkb9k" event={"ID":"e66197a5-5610-4f03-bfb3-4952c7d530e2","Type":"ContainerStarted","Data":"b04699a09c4024f37f92a70f56d520dc9d0464cb1d6898bfbbb41da44b174103"} Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.093750 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-pxbm7" event={"ID":"4b64fc1e-475a-4d69-a7ac-a23a7b5a7909","Type":"ContainerStarted","Data":"d5873ddd135e6bb3dc05c80ff5ac34bee1b065949b9cfc6cd7a7aacdadf53d70"} Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.097077 4854 generic.go:334] "Generic (PLEG): container finished" podID="621fc295-6eae-4091-950e-c883d64bf7b8" containerID="81793df50c17ade2916ec80918d9f94906ab2a5028c2b2dc972eb54e88b88f2f" exitCode=0 Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.097846 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zznfv" event={"ID":"621fc295-6eae-4091-950e-c883d64bf7b8","Type":"ContainerDied","Data":"81793df50c17ade2916ec80918d9f94906ab2a5028c2b2dc972eb54e88b88f2f"} Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.097869 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zznfv" event={"ID":"621fc295-6eae-4091-950e-c883d64bf7b8","Type":"ContainerStarted","Data":"d3dd3d5e8e1e01589fa860b9d0d46d9d8926ead1fbbe56b9bb5b173e297c3542"} Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.122422 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:20 crc kubenswrapper[4854]: E1125 09:38:20.122826 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:38:20.622808247 +0000 UTC m=+106.475801633 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.123141 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:20 crc kubenswrapper[4854]: E1125 09:38:20.123942 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:38:20.623933499 +0000 UTC m=+106.476926875 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sd29s" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.132503 4854 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-25T09:38:19.352471605Z","Handler":null,"Name":""} Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.135511 4854 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.135544 4854 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.218942 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4xq88"] Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.224897 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.236470 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.325320 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-jck8z" Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.325789 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.333624 4854 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.333652 4854 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.343952 4854 patch_prober.go:28] interesting pod/downloads-7954f5f757-jvdld container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.344029 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-jvdld" podUID="a2d7193d-eb53-446a-a96c-49d28dbbe724" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.344526 4854 patch_prober.go:28] interesting pod/downloads-7954f5f757-jvdld container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.344555 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-jvdld" podUID="a2d7193d-eb53-446a-a96c-49d28dbbe724" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.373811 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sd29s\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.389523 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.426533 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7ea5d904-b5da-4a4a-9221-f808841b0052-config-volume\") pod \"7ea5d904-b5da-4a4a-9221-f808841b0052\" (UID: \"7ea5d904-b5da-4a4a-9221-f808841b0052\") " Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.426692 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jcj2g\" (UniqueName: \"kubernetes.io/projected/7ea5d904-b5da-4a4a-9221-f808841b0052-kube-api-access-jcj2g\") pod \"7ea5d904-b5da-4a4a-9221-f808841b0052\" (UID: \"7ea5d904-b5da-4a4a-9221-f808841b0052\") " Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.426840 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7ea5d904-b5da-4a4a-9221-f808841b0052-secret-volume\") pod \"7ea5d904-b5da-4a4a-9221-f808841b0052\" (UID: \"7ea5d904-b5da-4a4a-9221-f808841b0052\") " Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.427729 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7ea5d904-b5da-4a4a-9221-f808841b0052-config-volume" (OuterVolumeSpecName: "config-volume") pod "7ea5d904-b5da-4a4a-9221-f808841b0052" (UID: "7ea5d904-b5da-4a4a-9221-f808841b0052"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.433459 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ea5d904-b5da-4a4a-9221-f808841b0052-kube-api-access-jcj2g" (OuterVolumeSpecName: "kube-api-access-jcj2g") pod "7ea5d904-b5da-4a4a-9221-f808841b0052" (UID: "7ea5d904-b5da-4a4a-9221-f808841b0052"). InnerVolumeSpecName "kube-api-access-jcj2g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.434617 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ea5d904-b5da-4a4a-9221-f808841b0052-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "7ea5d904-b5da-4a4a-9221-f808841b0052" (UID: "7ea5d904-b5da-4a4a-9221-f808841b0052"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.516248 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.528117 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-sb8md" Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.528777 4854 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7ea5d904-b5da-4a4a-9221-f808841b0052-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.528803 4854 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7ea5d904-b5da-4a4a-9221-f808841b0052-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.528813 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jcj2g\" (UniqueName: \"kubernetes.io/projected/7ea5d904-b5da-4a4a-9221-f808841b0052-kube-api-access-jcj2g\") on node \"crc\" DevicePath \"\"" Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.593750 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-sd29s"] Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.725254 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-8f2r9"] Nov 25 09:38:20 crc kubenswrapper[4854]: E1125 09:38:20.725691 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ea5d904-b5da-4a4a-9221-f808841b0052" containerName="collect-profiles" Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.725702 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ea5d904-b5da-4a4a-9221-f808841b0052" containerName="collect-profiles" Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.725806 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ea5d904-b5da-4a4a-9221-f808841b0052" containerName="collect-profiles" Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.726447 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8f2r9" Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.736256 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.742030 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8f2r9"] Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.751225 4854 patch_prober.go:28] interesting pod/router-default-5444994796-vc2n9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:38:20 crc kubenswrapper[4854]: [-]has-synced failed: reason withheld Nov 25 09:38:20 crc kubenswrapper[4854]: [+]process-running ok Nov 25 09:38:20 crc kubenswrapper[4854]: healthz check failed Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.751284 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vc2n9" podUID="1e52d13d-15dd-437f-8d2d-88709419d1f2" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.832899 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lqc2t\" (UniqueName: \"kubernetes.io/projected/0e545002-87d9-40ff-bf70-684e7b89f8f1-kube-api-access-lqc2t\") pod \"redhat-marketplace-8f2r9\" (UID: \"0e545002-87d9-40ff-bf70-684e7b89f8f1\") " pod="openshift-marketplace/redhat-marketplace-8f2r9" Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.832969 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e545002-87d9-40ff-bf70-684e7b89f8f1-catalog-content\") pod \"redhat-marketplace-8f2r9\" (UID: \"0e545002-87d9-40ff-bf70-684e7b89f8f1\") " pod="openshift-marketplace/redhat-marketplace-8f2r9" Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.833024 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e545002-87d9-40ff-bf70-684e7b89f8f1-utilities\") pod \"redhat-marketplace-8f2r9\" (UID: \"0e545002-87d9-40ff-bf70-684e7b89f8f1\") " pod="openshift-marketplace/redhat-marketplace-8f2r9" Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.936315 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e545002-87d9-40ff-bf70-684e7b89f8f1-catalog-content\") pod \"redhat-marketplace-8f2r9\" (UID: \"0e545002-87d9-40ff-bf70-684e7b89f8f1\") " pod="openshift-marketplace/redhat-marketplace-8f2r9" Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.936425 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e545002-87d9-40ff-bf70-684e7b89f8f1-utilities\") pod \"redhat-marketplace-8f2r9\" (UID: \"0e545002-87d9-40ff-bf70-684e7b89f8f1\") " pod="openshift-marketplace/redhat-marketplace-8f2r9" Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.936511 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lqc2t\" (UniqueName: \"kubernetes.io/projected/0e545002-87d9-40ff-bf70-684e7b89f8f1-kube-api-access-lqc2t\") pod \"redhat-marketplace-8f2r9\" (UID: \"0e545002-87d9-40ff-bf70-684e7b89f8f1\") " pod="openshift-marketplace/redhat-marketplace-8f2r9" Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.937173 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e545002-87d9-40ff-bf70-684e7b89f8f1-catalog-content\") pod \"redhat-marketplace-8f2r9\" (UID: \"0e545002-87d9-40ff-bf70-684e7b89f8f1\") " pod="openshift-marketplace/redhat-marketplace-8f2r9" Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.937366 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e545002-87d9-40ff-bf70-684e7b89f8f1-utilities\") pod \"redhat-marketplace-8f2r9\" (UID: \"0e545002-87d9-40ff-bf70-684e7b89f8f1\") " pod="openshift-marketplace/redhat-marketplace-8f2r9" Nov 25 09:38:20 crc kubenswrapper[4854]: I1125 09:38:20.959220 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lqc2t\" (UniqueName: \"kubernetes.io/projected/0e545002-87d9-40ff-bf70-684e7b89f8f1-kube-api-access-lqc2t\") pod \"redhat-marketplace-8f2r9\" (UID: \"0e545002-87d9-40ff-bf70-684e7b89f8f1\") " pod="openshift-marketplace/redhat-marketplace-8f2r9" Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.023247 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.054181 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8f2r9" Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.138270 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-nsfgv"] Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.139380 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nsfgv" Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.141853 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-pxbm7" event={"ID":"4b64fc1e-475a-4d69-a7ac-a23a7b5a7909","Type":"ContainerStarted","Data":"340dfe49af984b2db5fb07ee3953349ed07c545f9fe64df707326a62dc164d95"} Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.144105 4854 generic.go:334] "Generic (PLEG): container finished" podID="3066e5f3-b1f4-4415-8a74-32f39d2f8926" containerID="1f69c588fb470d241c361934a908344f127a75c71ac44fe0cd92e7736e8f14f8" exitCode=0 Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.144179 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4xq88" event={"ID":"3066e5f3-b1f4-4415-8a74-32f39d2f8926","Type":"ContainerDied","Data":"1f69c588fb470d241c361934a908344f127a75c71ac44fe0cd92e7736e8f14f8"} Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.144209 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4xq88" event={"ID":"3066e5f3-b1f4-4415-8a74-32f39d2f8926","Type":"ContainerStarted","Data":"e7a66ad9be5b5e3ab5c06fe977aadf60f5a9c9d6c15d6f999887a7dea80e812a"} Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.152343 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-nsfgv"] Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.152847 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-jck8z" event={"ID":"7ea5d904-b5da-4a4a-9221-f808841b0052","Type":"ContainerDied","Data":"2c6b63829b21d56c72abdc68f17f1cd0aa810d6a31a0d2cd72f5bae5cfa79855"} Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.152892 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2c6b63829b21d56c72abdc68f17f1cd0aa810d6a31a0d2cd72f5bae5cfa79855" Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.152998 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-jck8z" Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.159536 4854 generic.go:334] "Generic (PLEG): container finished" podID="e66197a5-5610-4f03-bfb3-4952c7d530e2" containerID="743b0bb31af3221c8ff6d9fb7e9bdcfe848e2ff5ad8c680626a7f9f9273a3994" exitCode=0 Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.159597 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nkb9k" event={"ID":"e66197a5-5610-4f03-bfb3-4952c7d530e2","Type":"ContainerDied","Data":"743b0bb31af3221c8ff6d9fb7e9bdcfe848e2ff5ad8c680626a7f9f9273a3994"} Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.163804 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" event={"ID":"974e5caf-5513-4e3e-b8f6-cf67c37b12bb","Type":"ContainerStarted","Data":"8596854e3466b6bff6a11eac4b4063a76d9d227ab6d5c3b6de1c69a4ce7e713e"} Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.163854 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" event={"ID":"974e5caf-5513-4e3e-b8f6-cf67c37b12bb","Type":"ContainerStarted","Data":"0a00556ecfc0ffa73665188d46ce8b7323ff0249feafef3477d548c58835c3f0"} Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.164117 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.241339 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-pxbm7" podStartSLOduration=13.241317353 podStartE2EDuration="13.241317353s" podCreationTimestamp="2025-11-25 09:38:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:21.235240251 +0000 UTC m=+107.088233627" watchObservedRunningTime="2025-11-25 09:38:21.241317353 +0000 UTC m=+107.094310739" Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.273012 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" podStartSLOduration=85.272995469 podStartE2EDuration="1m25.272995469s" podCreationTimestamp="2025-11-25 09:36:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:38:21.263765168 +0000 UTC m=+107.116758554" watchObservedRunningTime="2025-11-25 09:38:21.272995469 +0000 UTC m=+107.125988835" Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.302966 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5hvjb" Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.307333 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5hvjb" Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.346261 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51de5364-478f-4774-a9ed-230222a4a161-catalog-content\") pod \"redhat-marketplace-nsfgv\" (UID: \"51de5364-478f-4774-a9ed-230222a4a161\") " pod="openshift-marketplace/redhat-marketplace-nsfgv" Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.346307 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51de5364-478f-4774-a9ed-230222a4a161-utilities\") pod \"redhat-marketplace-nsfgv\" (UID: \"51de5364-478f-4774-a9ed-230222a4a161\") " pod="openshift-marketplace/redhat-marketplace-nsfgv" Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.346539 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dx672\" (UniqueName: \"kubernetes.io/projected/51de5364-478f-4774-a9ed-230222a4a161-kube-api-access-dx672\") pod \"redhat-marketplace-nsfgv\" (UID: \"51de5364-478f-4774-a9ed-230222a4a161\") " pod="openshift-marketplace/redhat-marketplace-nsfgv" Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.364969 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-mz9k6" Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.447372 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dx672\" (UniqueName: \"kubernetes.io/projected/51de5364-478f-4774-a9ed-230222a4a161-kube-api-access-dx672\") pod \"redhat-marketplace-nsfgv\" (UID: \"51de5364-478f-4774-a9ed-230222a4a161\") " pod="openshift-marketplace/redhat-marketplace-nsfgv" Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.447426 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51de5364-478f-4774-a9ed-230222a4a161-catalog-content\") pod \"redhat-marketplace-nsfgv\" (UID: \"51de5364-478f-4774-a9ed-230222a4a161\") " pod="openshift-marketplace/redhat-marketplace-nsfgv" Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.447448 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51de5364-478f-4774-a9ed-230222a4a161-utilities\") pod \"redhat-marketplace-nsfgv\" (UID: \"51de5364-478f-4774-a9ed-230222a4a161\") " pod="openshift-marketplace/redhat-marketplace-nsfgv" Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.448384 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51de5364-478f-4774-a9ed-230222a4a161-utilities\") pod \"redhat-marketplace-nsfgv\" (UID: \"51de5364-478f-4774-a9ed-230222a4a161\") " pod="openshift-marketplace/redhat-marketplace-nsfgv" Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.449170 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51de5364-478f-4774-a9ed-230222a4a161-catalog-content\") pod \"redhat-marketplace-nsfgv\" (UID: \"51de5364-478f-4774-a9ed-230222a4a161\") " pod="openshift-marketplace/redhat-marketplace-nsfgv" Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.464409 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dx672\" (UniqueName: \"kubernetes.io/projected/51de5364-478f-4774-a9ed-230222a4a161-kube-api-access-dx672\") pod \"redhat-marketplace-nsfgv\" (UID: \"51de5364-478f-4774-a9ed-230222a4a161\") " pod="openshift-marketplace/redhat-marketplace-nsfgv" Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.476121 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.488606 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nsfgv" Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.557304 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8f2r9"] Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.651387 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0edb61e7-9266-4c0b-a53b-7a50c7e80a9c-kube-api-access\") pod \"0edb61e7-9266-4c0b-a53b-7a50c7e80a9c\" (UID: \"0edb61e7-9266-4c0b-a53b-7a50c7e80a9c\") " Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.651723 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0edb61e7-9266-4c0b-a53b-7a50c7e80a9c-kubelet-dir\") pod \"0edb61e7-9266-4c0b-a53b-7a50c7e80a9c\" (UID: \"0edb61e7-9266-4c0b-a53b-7a50c7e80a9c\") " Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.651948 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0edb61e7-9266-4c0b-a53b-7a50c7e80a9c-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "0edb61e7-9266-4c0b-a53b-7a50c7e80a9c" (UID: "0edb61e7-9266-4c0b-a53b-7a50c7e80a9c"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.662881 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0edb61e7-9266-4c0b-a53b-7a50c7e80a9c-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0edb61e7-9266-4c0b-a53b-7a50c7e80a9c" (UID: "0edb61e7-9266-4c0b-a53b-7a50c7e80a9c"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.747375 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-vc2n9" Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.750872 4854 patch_prober.go:28] interesting pod/router-default-5444994796-vc2n9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:38:21 crc kubenswrapper[4854]: [-]has-synced failed: reason withheld Nov 25 09:38:21 crc kubenswrapper[4854]: [+]process-running ok Nov 25 09:38:21 crc kubenswrapper[4854]: healthz check failed Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.750935 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vc2n9" podUID="1e52d13d-15dd-437f-8d2d-88709419d1f2" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.752884 4854 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0edb61e7-9266-4c0b-a53b-7a50c7e80a9c-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.752909 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0edb61e7-9266-4c0b-a53b-7a50c7e80a9c-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.763872 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-nsfgv"] Nov 25 09:38:21 crc kubenswrapper[4854]: W1125 09:38:21.766661 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod51de5364_478f_4774_a9ed_230222a4a161.slice/crio-d751951891c941f4f653b3fa78bee85bbca69c1d1102b5590b813c743d5ecf24 WatchSource:0}: Error finding container d751951891c941f4f653b3fa78bee85bbca69c1d1102b5590b813c743d5ecf24: Status 404 returned error can't find the container with id d751951891c941f4f653b3fa78bee85bbca69c1d1102b5590b813c743d5ecf24 Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.824195 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-m5689" Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.825262 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-m5689" Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.828372 4854 patch_prober.go:28] interesting pod/console-f9d7485db-m5689 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.36:8443/health\": dial tcp 10.217.0.36:8443: connect: connection refused" start-of-body= Nov 25 09:38:21 crc kubenswrapper[4854]: I1125 09:38:21.828423 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-m5689" podUID="97e545f8-81c2-400b-a339-b2b3a1958492" containerName="console" probeResult="failure" output="Get \"https://10.217.0.36:8443/health\": dial tcp 10.217.0.36:8443: connect: connection refused" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.121349 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 25 09:38:22 crc kubenswrapper[4854]: E1125 09:38:22.126587 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0edb61e7-9266-4c0b-a53b-7a50c7e80a9c" containerName="pruner" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.126618 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="0edb61e7-9266-4c0b-a53b-7a50c7e80a9c" containerName="pruner" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.127077 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="0edb61e7-9266-4c0b-a53b-7a50c7e80a9c" containerName="pruner" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.127629 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.129453 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-tdb4p"] Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.133932 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.134473 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.155254 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.155300 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tdb4p"] Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.155494 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tdb4p" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.157993 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.194937 4854 generic.go:334] "Generic (PLEG): container finished" podID="0e545002-87d9-40ff-bf70-684e7b89f8f1" containerID="f8aa75e535305910f352e0ad128c403df4369b7a6f79bdfc42a4b66f970d6547" exitCode=0 Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.195587 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8f2r9" event={"ID":"0e545002-87d9-40ff-bf70-684e7b89f8f1","Type":"ContainerDied","Data":"f8aa75e535305910f352e0ad128c403df4369b7a6f79bdfc42a4b66f970d6547"} Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.195638 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8f2r9" event={"ID":"0e545002-87d9-40ff-bf70-684e7b89f8f1","Type":"ContainerStarted","Data":"df72a091e6f7e25a61011e34efcf9f4194ab460147da118daee606b9ad38a936"} Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.203683 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"0edb61e7-9266-4c0b-a53b-7a50c7e80a9c","Type":"ContainerDied","Data":"4b37ea32986d4cbb0a24913cbd5311d1fb43cfc8f4089d11682513ac930fe4fb"} Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.203876 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4b37ea32986d4cbb0a24913cbd5311d1fb43cfc8f4089d11682513ac930fe4fb" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.207148 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.220201 4854 generic.go:334] "Generic (PLEG): container finished" podID="51de5364-478f-4774-a9ed-230222a4a161" containerID="5d1d7370dc33ea90c516897cf9f2780e07b9929e89f90ba521b24fa22b82f99b" exitCode=0 Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.220323 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nsfgv" event={"ID":"51de5364-478f-4774-a9ed-230222a4a161","Type":"ContainerDied","Data":"5d1d7370dc33ea90c516897cf9f2780e07b9929e89f90ba521b24fa22b82f99b"} Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.220347 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nsfgv" event={"ID":"51de5364-478f-4774-a9ed-230222a4a161","Type":"ContainerStarted","Data":"d751951891c941f4f653b3fa78bee85bbca69c1d1102b5590b813c743d5ecf24"} Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.269938 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91299a1e-6ae1-44b2-ade3-912ba1568cfb-utilities\") pod \"redhat-operators-tdb4p\" (UID: \"91299a1e-6ae1-44b2-ade3-912ba1568cfb\") " pod="openshift-marketplace/redhat-operators-tdb4p" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.269981 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91299a1e-6ae1-44b2-ade3-912ba1568cfb-catalog-content\") pod \"redhat-operators-tdb4p\" (UID: \"91299a1e-6ae1-44b2-ade3-912ba1568cfb\") " pod="openshift-marketplace/redhat-operators-tdb4p" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.270043 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0486c15c-f0e4-4012-897f-7d574a054e02-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"0486c15c-f0e4-4012-897f-7d574a054e02\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.270059 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0486c15c-f0e4-4012-897f-7d574a054e02-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"0486c15c-f0e4-4012-897f-7d574a054e02\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.270096 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n9zrs\" (UniqueName: \"kubernetes.io/projected/91299a1e-6ae1-44b2-ade3-912ba1568cfb-kube-api-access-n9zrs\") pod \"redhat-operators-tdb4p\" (UID: \"91299a1e-6ae1-44b2-ade3-912ba1568cfb\") " pod="openshift-marketplace/redhat-operators-tdb4p" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.371425 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91299a1e-6ae1-44b2-ade3-912ba1568cfb-utilities\") pod \"redhat-operators-tdb4p\" (UID: \"91299a1e-6ae1-44b2-ade3-912ba1568cfb\") " pod="openshift-marketplace/redhat-operators-tdb4p" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.371512 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91299a1e-6ae1-44b2-ade3-912ba1568cfb-catalog-content\") pod \"redhat-operators-tdb4p\" (UID: \"91299a1e-6ae1-44b2-ade3-912ba1568cfb\") " pod="openshift-marketplace/redhat-operators-tdb4p" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.371714 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0486c15c-f0e4-4012-897f-7d574a054e02-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"0486c15c-f0e4-4012-897f-7d574a054e02\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.371744 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0486c15c-f0e4-4012-897f-7d574a054e02-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"0486c15c-f0e4-4012-897f-7d574a054e02\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.371811 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n9zrs\" (UniqueName: \"kubernetes.io/projected/91299a1e-6ae1-44b2-ade3-912ba1568cfb-kube-api-access-n9zrs\") pod \"redhat-operators-tdb4p\" (UID: \"91299a1e-6ae1-44b2-ade3-912ba1568cfb\") " pod="openshift-marketplace/redhat-operators-tdb4p" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.373360 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0486c15c-f0e4-4012-897f-7d574a054e02-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"0486c15c-f0e4-4012-897f-7d574a054e02\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.375321 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91299a1e-6ae1-44b2-ade3-912ba1568cfb-catalog-content\") pod \"redhat-operators-tdb4p\" (UID: \"91299a1e-6ae1-44b2-ade3-912ba1568cfb\") " pod="openshift-marketplace/redhat-operators-tdb4p" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.376408 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91299a1e-6ae1-44b2-ade3-912ba1568cfb-utilities\") pod \"redhat-operators-tdb4p\" (UID: \"91299a1e-6ae1-44b2-ade3-912ba1568cfb\") " pod="openshift-marketplace/redhat-operators-tdb4p" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.389900 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0486c15c-f0e4-4012-897f-7d574a054e02-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"0486c15c-f0e4-4012-897f-7d574a054e02\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.392515 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n9zrs\" (UniqueName: \"kubernetes.io/projected/91299a1e-6ae1-44b2-ade3-912ba1568cfb-kube-api-access-n9zrs\") pod \"redhat-operators-tdb4p\" (UID: \"91299a1e-6ae1-44b2-ade3-912ba1568cfb\") " pod="openshift-marketplace/redhat-operators-tdb4p" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.461072 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.482661 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tdb4p" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.526986 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-w2t47"] Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.543219 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-w2t47"] Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.543375 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-w2t47" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.677053 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909-utilities\") pod \"redhat-operators-w2t47\" (UID: \"9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909\") " pod="openshift-marketplace/redhat-operators-w2t47" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.677397 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7xvk5\" (UniqueName: \"kubernetes.io/projected/9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909-kube-api-access-7xvk5\") pod \"redhat-operators-w2t47\" (UID: \"9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909\") " pod="openshift-marketplace/redhat-operators-w2t47" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.677444 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909-catalog-content\") pod \"redhat-operators-w2t47\" (UID: \"9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909\") " pod="openshift-marketplace/redhat-operators-w2t47" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.753001 4854 patch_prober.go:28] interesting pod/router-default-5444994796-vc2n9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:38:22 crc kubenswrapper[4854]: [-]has-synced failed: reason withheld Nov 25 09:38:22 crc kubenswrapper[4854]: [+]process-running ok Nov 25 09:38:22 crc kubenswrapper[4854]: healthz check failed Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.753070 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vc2n9" podUID="1e52d13d-15dd-437f-8d2d-88709419d1f2" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.780249 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909-utilities\") pod \"redhat-operators-w2t47\" (UID: \"9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909\") " pod="openshift-marketplace/redhat-operators-w2t47" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.780313 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7xvk5\" (UniqueName: \"kubernetes.io/projected/9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909-kube-api-access-7xvk5\") pod \"redhat-operators-w2t47\" (UID: \"9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909\") " pod="openshift-marketplace/redhat-operators-w2t47" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.780358 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909-catalog-content\") pod \"redhat-operators-w2t47\" (UID: \"9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909\") " pod="openshift-marketplace/redhat-operators-w2t47" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.781349 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909-catalog-content\") pod \"redhat-operators-w2t47\" (UID: \"9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909\") " pod="openshift-marketplace/redhat-operators-w2t47" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.781367 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909-utilities\") pod \"redhat-operators-w2t47\" (UID: \"9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909\") " pod="openshift-marketplace/redhat-operators-w2t47" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.800836 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7xvk5\" (UniqueName: \"kubernetes.io/projected/9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909-kube-api-access-7xvk5\") pod \"redhat-operators-w2t47\" (UID: \"9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909\") " pod="openshift-marketplace/redhat-operators-w2t47" Nov 25 09:38:22 crc kubenswrapper[4854]: I1125 09:38:22.863972 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-w2t47" Nov 25 09:38:23 crc kubenswrapper[4854]: I1125 09:38:22.997863 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tdb4p"] Nov 25 09:38:23 crc kubenswrapper[4854]: W1125 09:38:23.025686 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod91299a1e_6ae1_44b2_ade3_912ba1568cfb.slice/crio-90d7db9b1e4ddcb38cf29bef6d4659636daf50e18b07831cf64354ee974738d5 WatchSource:0}: Error finding container 90d7db9b1e4ddcb38cf29bef6d4659636daf50e18b07831cf64354ee974738d5: Status 404 returned error can't find the container with id 90d7db9b1e4ddcb38cf29bef6d4659636daf50e18b07831cf64354ee974738d5 Nov 25 09:38:23 crc kubenswrapper[4854]: I1125 09:38:23.071049 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 25 09:38:23 crc kubenswrapper[4854]: I1125 09:38:23.158962 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-w2t47"] Nov 25 09:38:23 crc kubenswrapper[4854]: W1125 09:38:23.275601 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d3d0ba6_9ff0_4c6e_9ecf_e7e67e7b6909.slice/crio-a696661bb93d17a5762e135de9f808f2b1b96e17852d6a906788de8e3f28e0db WatchSource:0}: Error finding container a696661bb93d17a5762e135de9f808f2b1b96e17852d6a906788de8e3f28e0db: Status 404 returned error can't find the container with id a696661bb93d17a5762e135de9f808f2b1b96e17852d6a906788de8e3f28e0db Nov 25 09:38:23 crc kubenswrapper[4854]: I1125 09:38:23.295223 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"0486c15c-f0e4-4012-897f-7d574a054e02","Type":"ContainerStarted","Data":"370d34be413c1d3344497f8035ddbeff00029092300168dfa47603a2ac373972"} Nov 25 09:38:23 crc kubenswrapper[4854]: I1125 09:38:23.298426 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tdb4p" event={"ID":"91299a1e-6ae1-44b2-ade3-912ba1568cfb","Type":"ContainerStarted","Data":"90d7db9b1e4ddcb38cf29bef6d4659636daf50e18b07831cf64354ee974738d5"} Nov 25 09:38:23 crc kubenswrapper[4854]: I1125 09:38:23.767443 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-vc2n9" Nov 25 09:38:23 crc kubenswrapper[4854]: I1125 09:38:23.772573 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-vc2n9" Nov 25 09:38:24 crc kubenswrapper[4854]: I1125 09:38:24.334777 4854 generic.go:334] "Generic (PLEG): container finished" podID="91299a1e-6ae1-44b2-ade3-912ba1568cfb" containerID="7772755a3bee8624b514939238310cb34e3eaf41ea96f4c1e996501d4c877762" exitCode=0 Nov 25 09:38:24 crc kubenswrapper[4854]: I1125 09:38:24.334876 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tdb4p" event={"ID":"91299a1e-6ae1-44b2-ade3-912ba1568cfb","Type":"ContainerDied","Data":"7772755a3bee8624b514939238310cb34e3eaf41ea96f4c1e996501d4c877762"} Nov 25 09:38:24 crc kubenswrapper[4854]: I1125 09:38:24.340402 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"0486c15c-f0e4-4012-897f-7d574a054e02","Type":"ContainerStarted","Data":"40ef7eefd2c26345fb5f4f89530c5ed563ea4b8fa693f8956b2040325756c912"} Nov 25 09:38:24 crc kubenswrapper[4854]: I1125 09:38:24.343648 4854 generic.go:334] "Generic (PLEG): container finished" podID="9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909" containerID="56291b9783cb25ac4b4ebc9d81e51360f9bd3242596298c3c3e81452a1868fb6" exitCode=0 Nov 25 09:38:24 crc kubenswrapper[4854]: I1125 09:38:24.344105 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w2t47" event={"ID":"9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909","Type":"ContainerDied","Data":"56291b9783cb25ac4b4ebc9d81e51360f9bd3242596298c3c3e81452a1868fb6"} Nov 25 09:38:24 crc kubenswrapper[4854]: I1125 09:38:24.344168 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w2t47" event={"ID":"9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909","Type":"ContainerStarted","Data":"a696661bb93d17a5762e135de9f808f2b1b96e17852d6a906788de8e3f28e0db"} Nov 25 09:38:25 crc kubenswrapper[4854]: I1125 09:38:25.354361 4854 generic.go:334] "Generic (PLEG): container finished" podID="0486c15c-f0e4-4012-897f-7d574a054e02" containerID="40ef7eefd2c26345fb5f4f89530c5ed563ea4b8fa693f8956b2040325756c912" exitCode=0 Nov 25 09:38:25 crc kubenswrapper[4854]: I1125 09:38:25.354567 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"0486c15c-f0e4-4012-897f-7d574a054e02","Type":"ContainerDied","Data":"40ef7eefd2c26345fb5f4f89530c5ed563ea4b8fa693f8956b2040325756c912"} Nov 25 09:38:26 crc kubenswrapper[4854]: I1125 09:38:26.579104 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-4nz5q" Nov 25 09:38:30 crc kubenswrapper[4854]: I1125 09:38:30.345046 4854 patch_prober.go:28] interesting pod/downloads-7954f5f757-jvdld container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Nov 25 09:38:30 crc kubenswrapper[4854]: I1125 09:38:30.345245 4854 patch_prober.go:28] interesting pod/downloads-7954f5f757-jvdld container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Nov 25 09:38:30 crc kubenswrapper[4854]: I1125 09:38:30.345316 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-jvdld" podUID="a2d7193d-eb53-446a-a96c-49d28dbbe724" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Nov 25 09:38:30 crc kubenswrapper[4854]: I1125 09:38:30.345330 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-jvdld" podUID="a2d7193d-eb53-446a-a96c-49d28dbbe724" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Nov 25 09:38:31 crc kubenswrapper[4854]: I1125 09:38:31.827288 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-m5689" Nov 25 09:38:31 crc kubenswrapper[4854]: I1125 09:38:31.831348 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-m5689" Nov 25 09:38:38 crc kubenswrapper[4854]: I1125 09:38:38.919717 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:38:38 crc kubenswrapper[4854]: I1125 09:38:38.963947 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0486c15c-f0e4-4012-897f-7d574a054e02-kubelet-dir\") pod \"0486c15c-f0e4-4012-897f-7d574a054e02\" (UID: \"0486c15c-f0e4-4012-897f-7d574a054e02\") " Nov 25 09:38:38 crc kubenswrapper[4854]: I1125 09:38:38.964021 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0486c15c-f0e4-4012-897f-7d574a054e02-kube-api-access\") pod \"0486c15c-f0e4-4012-897f-7d574a054e02\" (UID: \"0486c15c-f0e4-4012-897f-7d574a054e02\") " Nov 25 09:38:38 crc kubenswrapper[4854]: I1125 09:38:38.964144 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0486c15c-f0e4-4012-897f-7d574a054e02-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "0486c15c-f0e4-4012-897f-7d574a054e02" (UID: "0486c15c-f0e4-4012-897f-7d574a054e02"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:38:38 crc kubenswrapper[4854]: I1125 09:38:38.964299 4854 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0486c15c-f0e4-4012-897f-7d574a054e02-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 09:38:38 crc kubenswrapper[4854]: I1125 09:38:38.974931 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0486c15c-f0e4-4012-897f-7d574a054e02-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0486c15c-f0e4-4012-897f-7d574a054e02" (UID: "0486c15c-f0e4-4012-897f-7d574a054e02"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:38:39 crc kubenswrapper[4854]: I1125 09:38:39.065165 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0486c15c-f0e4-4012-897f-7d574a054e02-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 09:38:39 crc kubenswrapper[4854]: I1125 09:38:39.448642 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"0486c15c-f0e4-4012-897f-7d574a054e02","Type":"ContainerDied","Data":"370d34be413c1d3344497f8035ddbeff00029092300168dfa47603a2ac373972"} Nov 25 09:38:39 crc kubenswrapper[4854]: I1125 09:38:39.448688 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:38:39 crc kubenswrapper[4854]: I1125 09:38:39.448701 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="370d34be413c1d3344497f8035ddbeff00029092300168dfa47603a2ac373972" Nov 25 09:38:40 crc kubenswrapper[4854]: I1125 09:38:40.373904 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-jvdld" Nov 25 09:38:40 crc kubenswrapper[4854]: I1125 09:38:40.404506 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:38:51 crc kubenswrapper[4854]: I1125 09:38:51.454002 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-6c7r5" Nov 25 09:38:51 crc kubenswrapper[4854]: E1125 09:38:51.852573 4854 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 25 09:38:51 crc kubenswrapper[4854]: E1125 09:38:51.852758 4854 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-g5ml9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-zznfv_openshift-marketplace(621fc295-6eae-4091-950e-c883d64bf7b8): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 09:38:51 crc kubenswrapper[4854]: E1125 09:38:51.853917 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-zznfv" podUID="621fc295-6eae-4091-950e-c883d64bf7b8" Nov 25 09:38:57 crc kubenswrapper[4854]: E1125 09:38:57.261576 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-zznfv" podUID="621fc295-6eae-4091-950e-c883d64bf7b8" Nov 25 09:39:02 crc kubenswrapper[4854]: E1125 09:39:02.336569 4854 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 25 09:39:02 crc kubenswrapper[4854]: E1125 09:39:02.336866 4854 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fmnhp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-nkb9k_openshift-marketplace(e66197a5-5610-4f03-bfb3-4952c7d530e2): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 09:39:02 crc kubenswrapper[4854]: E1125 09:39:02.338086 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-nkb9k" podUID="e66197a5-5610-4f03-bfb3-4952c7d530e2" Nov 25 09:39:02 crc kubenswrapper[4854]: E1125 09:39:02.362347 4854 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 25 09:39:02 crc kubenswrapper[4854]: E1125 09:39:02.362556 4854 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5wvsk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-64qr8_openshift-marketplace(b66bda32-eed5-4ea0-b10c-065038dce52d): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 09:39:02 crc kubenswrapper[4854]: E1125 09:39:02.363780 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-64qr8" podUID="b66bda32-eed5-4ea0-b10c-065038dce52d" Nov 25 09:39:04 crc kubenswrapper[4854]: I1125 09:39:04.005885 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:39:04 crc kubenswrapper[4854]: I1125 09:39:04.006583 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:39:04 crc kubenswrapper[4854]: I1125 09:39:04.006716 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:39:04 crc kubenswrapper[4854]: I1125 09:39:04.006766 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:39:04 crc kubenswrapper[4854]: I1125 09:39:04.007971 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 25 09:39:04 crc kubenswrapper[4854]: I1125 09:39:04.008900 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 25 09:39:04 crc kubenswrapper[4854]: I1125 09:39:04.009004 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 25 09:39:04 crc kubenswrapper[4854]: I1125 09:39:04.018919 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 25 09:39:04 crc kubenswrapper[4854]: I1125 09:39:04.023347 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:39:04 crc kubenswrapper[4854]: I1125 09:39:04.039250 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:39:04 crc kubenswrapper[4854]: I1125 09:39:04.039851 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:39:04 crc kubenswrapper[4854]: I1125 09:39:04.061277 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:39:04 crc kubenswrapper[4854]: I1125 09:39:04.335919 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:39:04 crc kubenswrapper[4854]: I1125 09:39:04.393453 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:39:04 crc kubenswrapper[4854]: I1125 09:39:04.669765 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:39:07 crc kubenswrapper[4854]: E1125 09:39:07.562176 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-nkb9k" podUID="e66197a5-5610-4f03-bfb3-4952c7d530e2" Nov 25 09:39:07 crc kubenswrapper[4854]: E1125 09:39:07.562176 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-64qr8" podUID="b66bda32-eed5-4ea0-b10c-065038dce52d" Nov 25 09:39:07 crc kubenswrapper[4854]: E1125 09:39:07.593815 4854 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 25 09:39:07 crc kubenswrapper[4854]: E1125 09:39:07.594455 4854 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-n9zrs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-tdb4p_openshift-marketplace(91299a1e-6ae1-44b2-ade3-912ba1568cfb): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 09:39:07 crc kubenswrapper[4854]: E1125 09:39:07.596016 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-tdb4p" podUID="91299a1e-6ae1-44b2-ade3-912ba1568cfb" Nov 25 09:39:07 crc kubenswrapper[4854]: E1125 09:39:07.603708 4854 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 25 09:39:07 crc kubenswrapper[4854]: E1125 09:39:07.603875 4854 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7xvk5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-w2t47_openshift-marketplace(9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 09:39:07 crc kubenswrapper[4854]: E1125 09:39:07.605030 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-w2t47" podUID="9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909" Nov 25 09:39:07 crc kubenswrapper[4854]: E1125 09:39:07.618505 4854 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 25 09:39:07 crc kubenswrapper[4854]: E1125 09:39:07.618861 4854 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ds4rn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-4xq88_openshift-marketplace(3066e5f3-b1f4-4415-8a74-32f39d2f8926): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 09:39:07 crc kubenswrapper[4854]: E1125 09:39:07.620033 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-4xq88" podUID="3066e5f3-b1f4-4415-8a74-32f39d2f8926" Nov 25 09:39:08 crc kubenswrapper[4854]: E1125 09:39:08.666730 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-4xq88" podUID="3066e5f3-b1f4-4415-8a74-32f39d2f8926" Nov 25 09:39:09 crc kubenswrapper[4854]: W1125 09:39:09.116802 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d751cbb_f2e2_430d_9754_c882a5e924a5.slice/crio-34b29bf1b7b7fc85ab088ce6553927dae8beb2732ade3942f3c9b31d7cc3136c WatchSource:0}: Error finding container 34b29bf1b7b7fc85ab088ce6553927dae8beb2732ade3942f3c9b31d7cc3136c: Status 404 returned error can't find the container with id 34b29bf1b7b7fc85ab088ce6553927dae8beb2732ade3942f3c9b31d7cc3136c Nov 25 09:39:09 crc kubenswrapper[4854]: I1125 09:39:09.627298 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"ecd03e4a9e20ce1c428030e24c58c5d6d9e6b5dca576a7999cf110b13051246e"} Nov 25 09:39:09 crc kubenswrapper[4854]: I1125 09:39:09.628305 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"f6121a5470c025212e1a00ce32e1805c3335fba796eba58b79025e02a1129de7"} Nov 25 09:39:09 crc kubenswrapper[4854]: I1125 09:39:09.629225 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"34b29bf1b7b7fc85ab088ce6553927dae8beb2732ade3942f3c9b31d7cc3136c"} Nov 25 09:39:10 crc kubenswrapper[4854]: I1125 09:39:10.642444 4854 generic.go:334] "Generic (PLEG): container finished" podID="0e545002-87d9-40ff-bf70-684e7b89f8f1" containerID="ac8d56469bad5d7fe4e331d0a7d9a30c219ffa7ac85b193d4f7fba92d3b85fd3" exitCode=0 Nov 25 09:39:10 crc kubenswrapper[4854]: I1125 09:39:10.642535 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8f2r9" event={"ID":"0e545002-87d9-40ff-bf70-684e7b89f8f1","Type":"ContainerDied","Data":"ac8d56469bad5d7fe4e331d0a7d9a30c219ffa7ac85b193d4f7fba92d3b85fd3"} Nov 25 09:39:10 crc kubenswrapper[4854]: I1125 09:39:10.644755 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"149096cfd6cf08a2322985bb7383800f03ba378fc5aaaaeaed2862329a4f8969"} Nov 25 09:39:10 crc kubenswrapper[4854]: I1125 09:39:10.649802 4854 generic.go:334] "Generic (PLEG): container finished" podID="51de5364-478f-4774-a9ed-230222a4a161" containerID="a4288b63aa261605d420f73c0041eb0c6895d99eadb465a62750e5c62d520cca" exitCode=0 Nov 25 09:39:10 crc kubenswrapper[4854]: I1125 09:39:10.649874 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nsfgv" event={"ID":"51de5364-478f-4774-a9ed-230222a4a161","Type":"ContainerDied","Data":"a4288b63aa261605d420f73c0041eb0c6895d99eadb465a62750e5c62d520cca"} Nov 25 09:39:10 crc kubenswrapper[4854]: I1125 09:39:10.652622 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"5ecefe314597e7fd8480977f3ed1f7e974bfc3183c410b3cf7224c9720cf73f1"} Nov 25 09:39:10 crc kubenswrapper[4854]: I1125 09:39:10.654269 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"685688dcfe629bf978d2e823896bd468c61e6701ae370a020b120d4e7336c24a"} Nov 25 09:39:10 crc kubenswrapper[4854]: I1125 09:39:10.654811 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:39:11 crc kubenswrapper[4854]: I1125 09:39:11.661641 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nsfgv" event={"ID":"51de5364-478f-4774-a9ed-230222a4a161","Type":"ContainerStarted","Data":"7eefbabf1a41e2d9d5fb29e0f3686193f8c552bc941a38c90d098b70ec0a7260"} Nov 25 09:39:11 crc kubenswrapper[4854]: I1125 09:39:11.684075 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-nsfgv" podStartSLOduration=1.576558181 podStartE2EDuration="50.684060219s" podCreationTimestamp="2025-11-25 09:38:21 +0000 UTC" firstStartedPulling="2025-11-25 09:38:22.224230175 +0000 UTC m=+108.077223551" lastFinishedPulling="2025-11-25 09:39:11.331732213 +0000 UTC m=+157.184725589" observedRunningTime="2025-11-25 09:39:11.682838282 +0000 UTC m=+157.535831658" watchObservedRunningTime="2025-11-25 09:39:11.684060219 +0000 UTC m=+157.537053595" Nov 25 09:39:12 crc kubenswrapper[4854]: I1125 09:39:12.669789 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8f2r9" event={"ID":"0e545002-87d9-40ff-bf70-684e7b89f8f1","Type":"ContainerStarted","Data":"7299c0b17f8e0eff3480a90090e6f01be87a8996e09cc854c75e7620b66678f2"} Nov 25 09:39:12 crc kubenswrapper[4854]: I1125 09:39:12.689335 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-8f2r9" podStartSLOduration=3.223172085 podStartE2EDuration="52.689319579s" podCreationTimestamp="2025-11-25 09:38:20 +0000 UTC" firstStartedPulling="2025-11-25 09:38:22.199055693 +0000 UTC m=+108.052049069" lastFinishedPulling="2025-11-25 09:39:11.665203177 +0000 UTC m=+157.518196563" observedRunningTime="2025-11-25 09:39:12.687502424 +0000 UTC m=+158.540495810" watchObservedRunningTime="2025-11-25 09:39:12.689319579 +0000 UTC m=+158.542312955" Nov 25 09:39:14 crc kubenswrapper[4854]: I1125 09:39:14.685588 4854 generic.go:334] "Generic (PLEG): container finished" podID="621fc295-6eae-4091-950e-c883d64bf7b8" containerID="c336667d04e1d4b289090caec8580055312b8949a763deb4590ebf4cfca831ec" exitCode=0 Nov 25 09:39:14 crc kubenswrapper[4854]: I1125 09:39:14.685700 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zznfv" event={"ID":"621fc295-6eae-4091-950e-c883d64bf7b8","Type":"ContainerDied","Data":"c336667d04e1d4b289090caec8580055312b8949a763deb4590ebf4cfca831ec"} Nov 25 09:39:16 crc kubenswrapper[4854]: I1125 09:39:16.698819 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zznfv" event={"ID":"621fc295-6eae-4091-950e-c883d64bf7b8","Type":"ContainerStarted","Data":"0b266a7c7a8f3f9c4fcfa6fb91d96e904f36fb12723ea8b7179aa292e7238eb0"} Nov 25 09:39:16 crc kubenswrapper[4854]: I1125 09:39:16.717912 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-zznfv" podStartSLOduration=2.435608509 podStartE2EDuration="58.717896199s" podCreationTimestamp="2025-11-25 09:38:18 +0000 UTC" firstStartedPulling="2025-11-25 09:38:20.101578067 +0000 UTC m=+105.954571443" lastFinishedPulling="2025-11-25 09:39:16.383865757 +0000 UTC m=+162.236859133" observedRunningTime="2025-11-25 09:39:16.714787315 +0000 UTC m=+162.567780691" watchObservedRunningTime="2025-11-25 09:39:16.717896199 +0000 UTC m=+162.570889575" Nov 25 09:39:19 crc kubenswrapper[4854]: I1125 09:39:19.334532 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-zznfv" Nov 25 09:39:19 crc kubenswrapper[4854]: I1125 09:39:19.335146 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-zznfv" Nov 25 09:39:19 crc kubenswrapper[4854]: I1125 09:39:19.560902 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-zznfv" Nov 25 09:39:21 crc kubenswrapper[4854]: I1125 09:39:21.055026 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-8f2r9" Nov 25 09:39:21 crc kubenswrapper[4854]: I1125 09:39:21.055336 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-8f2r9" Nov 25 09:39:21 crc kubenswrapper[4854]: I1125 09:39:21.100722 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-8f2r9" Nov 25 09:39:21 crc kubenswrapper[4854]: I1125 09:39:21.492932 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-nsfgv" Nov 25 09:39:21 crc kubenswrapper[4854]: I1125 09:39:21.492986 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-nsfgv" Nov 25 09:39:21 crc kubenswrapper[4854]: I1125 09:39:21.535329 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-nsfgv" Nov 25 09:39:21 crc kubenswrapper[4854]: I1125 09:39:21.769383 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-nsfgv" Nov 25 09:39:21 crc kubenswrapper[4854]: I1125 09:39:21.774971 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-8f2r9" Nov 25 09:39:22 crc kubenswrapper[4854]: I1125 09:39:22.737704 4854 generic.go:334] "Generic (PLEG): container finished" podID="91299a1e-6ae1-44b2-ade3-912ba1568cfb" containerID="4408381e59976b55ecd855722fb0c3f83cad6292a641ee0aa28cd7be1876e67c" exitCode=0 Nov 25 09:39:22 crc kubenswrapper[4854]: I1125 09:39:22.737782 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tdb4p" event={"ID":"91299a1e-6ae1-44b2-ade3-912ba1568cfb","Type":"ContainerDied","Data":"4408381e59976b55ecd855722fb0c3f83cad6292a641ee0aa28cd7be1876e67c"} Nov 25 09:39:22 crc kubenswrapper[4854]: I1125 09:39:22.742382 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-64qr8" event={"ID":"b66bda32-eed5-4ea0-b10c-065038dce52d","Type":"ContainerStarted","Data":"34c18f897940fee0125e2e2da32f9d306d785d62c9a7d595537c21917a6b50ee"} Nov 25 09:39:23 crc kubenswrapper[4854]: I1125 09:39:23.564026 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-nsfgv"] Nov 25 09:39:23 crc kubenswrapper[4854]: I1125 09:39:23.748337 4854 generic.go:334] "Generic (PLEG): container finished" podID="b66bda32-eed5-4ea0-b10c-065038dce52d" containerID="34c18f897940fee0125e2e2da32f9d306d785d62c9a7d595537c21917a6b50ee" exitCode=0 Nov 25 09:39:23 crc kubenswrapper[4854]: I1125 09:39:23.748398 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-64qr8" event={"ID":"b66bda32-eed5-4ea0-b10c-065038dce52d","Type":"ContainerDied","Data":"34c18f897940fee0125e2e2da32f9d306d785d62c9a7d595537c21917a6b50ee"} Nov 25 09:39:23 crc kubenswrapper[4854]: I1125 09:39:23.748566 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-nsfgv" podUID="51de5364-478f-4774-a9ed-230222a4a161" containerName="registry-server" containerID="cri-o://7eefbabf1a41e2d9d5fb29e0f3686193f8c552bc941a38c90d098b70ec0a7260" gracePeriod=2 Nov 25 09:39:24 crc kubenswrapper[4854]: I1125 09:39:24.301061 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nsfgv" Nov 25 09:39:24 crc kubenswrapper[4854]: I1125 09:39:24.409090 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dx672\" (UniqueName: \"kubernetes.io/projected/51de5364-478f-4774-a9ed-230222a4a161-kube-api-access-dx672\") pod \"51de5364-478f-4774-a9ed-230222a4a161\" (UID: \"51de5364-478f-4774-a9ed-230222a4a161\") " Nov 25 09:39:24 crc kubenswrapper[4854]: I1125 09:39:24.409146 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51de5364-478f-4774-a9ed-230222a4a161-catalog-content\") pod \"51de5364-478f-4774-a9ed-230222a4a161\" (UID: \"51de5364-478f-4774-a9ed-230222a4a161\") " Nov 25 09:39:24 crc kubenswrapper[4854]: I1125 09:39:24.409201 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51de5364-478f-4774-a9ed-230222a4a161-utilities\") pod \"51de5364-478f-4774-a9ed-230222a4a161\" (UID: \"51de5364-478f-4774-a9ed-230222a4a161\") " Nov 25 09:39:24 crc kubenswrapper[4854]: I1125 09:39:24.410095 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/51de5364-478f-4774-a9ed-230222a4a161-utilities" (OuterVolumeSpecName: "utilities") pod "51de5364-478f-4774-a9ed-230222a4a161" (UID: "51de5364-478f-4774-a9ed-230222a4a161"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:39:24 crc kubenswrapper[4854]: I1125 09:39:24.415577 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51de5364-478f-4774-a9ed-230222a4a161-kube-api-access-dx672" (OuterVolumeSpecName: "kube-api-access-dx672") pod "51de5364-478f-4774-a9ed-230222a4a161" (UID: "51de5364-478f-4774-a9ed-230222a4a161"). InnerVolumeSpecName "kube-api-access-dx672". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:39:24 crc kubenswrapper[4854]: I1125 09:39:24.426529 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/51de5364-478f-4774-a9ed-230222a4a161-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "51de5364-478f-4774-a9ed-230222a4a161" (UID: "51de5364-478f-4774-a9ed-230222a4a161"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:39:24 crc kubenswrapper[4854]: I1125 09:39:24.510509 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dx672\" (UniqueName: \"kubernetes.io/projected/51de5364-478f-4774-a9ed-230222a4a161-kube-api-access-dx672\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:24 crc kubenswrapper[4854]: I1125 09:39:24.510550 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51de5364-478f-4774-a9ed-230222a4a161-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:24 crc kubenswrapper[4854]: I1125 09:39:24.510561 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51de5364-478f-4774-a9ed-230222a4a161-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:24 crc kubenswrapper[4854]: I1125 09:39:24.778172 4854 generic.go:334] "Generic (PLEG): container finished" podID="51de5364-478f-4774-a9ed-230222a4a161" containerID="7eefbabf1a41e2d9d5fb29e0f3686193f8c552bc941a38c90d098b70ec0a7260" exitCode=0 Nov 25 09:39:24 crc kubenswrapper[4854]: I1125 09:39:24.778234 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nsfgv" event={"ID":"51de5364-478f-4774-a9ed-230222a4a161","Type":"ContainerDied","Data":"7eefbabf1a41e2d9d5fb29e0f3686193f8c552bc941a38c90d098b70ec0a7260"} Nov 25 09:39:24 crc kubenswrapper[4854]: I1125 09:39:24.778260 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nsfgv" event={"ID":"51de5364-478f-4774-a9ed-230222a4a161","Type":"ContainerDied","Data":"d751951891c941f4f653b3fa78bee85bbca69c1d1102b5590b813c743d5ecf24"} Nov 25 09:39:24 crc kubenswrapper[4854]: I1125 09:39:24.778279 4854 scope.go:117] "RemoveContainer" containerID="7eefbabf1a41e2d9d5fb29e0f3686193f8c552bc941a38c90d098b70ec0a7260" Nov 25 09:39:24 crc kubenswrapper[4854]: I1125 09:39:24.778370 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nsfgv" Nov 25 09:39:24 crc kubenswrapper[4854]: I1125 09:39:24.785660 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tdb4p" event={"ID":"91299a1e-6ae1-44b2-ade3-912ba1568cfb","Type":"ContainerStarted","Data":"c2aab0c89795a8bdfc8f68ad97648efcec063ecddba5f253cc2842b5261fd717"} Nov 25 09:39:24 crc kubenswrapper[4854]: I1125 09:39:24.802221 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-tdb4p" podStartSLOduration=9.363900135 podStartE2EDuration="1m2.802202601s" podCreationTimestamp="2025-11-25 09:38:22 +0000 UTC" firstStartedPulling="2025-11-25 09:38:30.343994771 +0000 UTC m=+116.196988157" lastFinishedPulling="2025-11-25 09:39:23.782297247 +0000 UTC m=+169.635290623" observedRunningTime="2025-11-25 09:39:24.801439437 +0000 UTC m=+170.654432823" watchObservedRunningTime="2025-11-25 09:39:24.802202601 +0000 UTC m=+170.655195977" Nov 25 09:39:24 crc kubenswrapper[4854]: I1125 09:39:24.824565 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-nsfgv"] Nov 25 09:39:24 crc kubenswrapper[4854]: I1125 09:39:24.827741 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-nsfgv"] Nov 25 09:39:24 crc kubenswrapper[4854]: I1125 09:39:24.882115 4854 scope.go:117] "RemoveContainer" containerID="a4288b63aa261605d420f73c0041eb0c6895d99eadb465a62750e5c62d520cca" Nov 25 09:39:25 crc kubenswrapper[4854]: I1125 09:39:25.019648 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="51de5364-478f-4774-a9ed-230222a4a161" path="/var/lib/kubelet/pods/51de5364-478f-4774-a9ed-230222a4a161/volumes" Nov 25 09:39:25 crc kubenswrapper[4854]: I1125 09:39:25.028914 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:39:25 crc kubenswrapper[4854]: I1125 09:39:25.028961 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:39:25 crc kubenswrapper[4854]: I1125 09:39:25.381750 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-62dll"] Nov 25 09:39:25 crc kubenswrapper[4854]: I1125 09:39:25.955490 4854 scope.go:117] "RemoveContainer" containerID="5d1d7370dc33ea90c516897cf9f2780e07b9929e89f90ba521b24fa22b82f99b" Nov 25 09:39:26 crc kubenswrapper[4854]: I1125 09:39:26.345547 4854 scope.go:117] "RemoveContainer" containerID="7eefbabf1a41e2d9d5fb29e0f3686193f8c552bc941a38c90d098b70ec0a7260" Nov 25 09:39:26 crc kubenswrapper[4854]: E1125 09:39:26.345948 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7eefbabf1a41e2d9d5fb29e0f3686193f8c552bc941a38c90d098b70ec0a7260\": container with ID starting with 7eefbabf1a41e2d9d5fb29e0f3686193f8c552bc941a38c90d098b70ec0a7260 not found: ID does not exist" containerID="7eefbabf1a41e2d9d5fb29e0f3686193f8c552bc941a38c90d098b70ec0a7260" Nov 25 09:39:26 crc kubenswrapper[4854]: I1125 09:39:26.345994 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7eefbabf1a41e2d9d5fb29e0f3686193f8c552bc941a38c90d098b70ec0a7260"} err="failed to get container status \"7eefbabf1a41e2d9d5fb29e0f3686193f8c552bc941a38c90d098b70ec0a7260\": rpc error: code = NotFound desc = could not find container \"7eefbabf1a41e2d9d5fb29e0f3686193f8c552bc941a38c90d098b70ec0a7260\": container with ID starting with 7eefbabf1a41e2d9d5fb29e0f3686193f8c552bc941a38c90d098b70ec0a7260 not found: ID does not exist" Nov 25 09:39:26 crc kubenswrapper[4854]: I1125 09:39:26.346053 4854 scope.go:117] "RemoveContainer" containerID="a4288b63aa261605d420f73c0041eb0c6895d99eadb465a62750e5c62d520cca" Nov 25 09:39:26 crc kubenswrapper[4854]: E1125 09:39:26.346337 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a4288b63aa261605d420f73c0041eb0c6895d99eadb465a62750e5c62d520cca\": container with ID starting with a4288b63aa261605d420f73c0041eb0c6895d99eadb465a62750e5c62d520cca not found: ID does not exist" containerID="a4288b63aa261605d420f73c0041eb0c6895d99eadb465a62750e5c62d520cca" Nov 25 09:39:26 crc kubenswrapper[4854]: I1125 09:39:26.346376 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a4288b63aa261605d420f73c0041eb0c6895d99eadb465a62750e5c62d520cca"} err="failed to get container status \"a4288b63aa261605d420f73c0041eb0c6895d99eadb465a62750e5c62d520cca\": rpc error: code = NotFound desc = could not find container \"a4288b63aa261605d420f73c0041eb0c6895d99eadb465a62750e5c62d520cca\": container with ID starting with a4288b63aa261605d420f73c0041eb0c6895d99eadb465a62750e5c62d520cca not found: ID does not exist" Nov 25 09:39:26 crc kubenswrapper[4854]: I1125 09:39:26.346398 4854 scope.go:117] "RemoveContainer" containerID="5d1d7370dc33ea90c516897cf9f2780e07b9929e89f90ba521b24fa22b82f99b" Nov 25 09:39:26 crc kubenswrapper[4854]: E1125 09:39:26.346944 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5d1d7370dc33ea90c516897cf9f2780e07b9929e89f90ba521b24fa22b82f99b\": container with ID starting with 5d1d7370dc33ea90c516897cf9f2780e07b9929e89f90ba521b24fa22b82f99b not found: ID does not exist" containerID="5d1d7370dc33ea90c516897cf9f2780e07b9929e89f90ba521b24fa22b82f99b" Nov 25 09:39:26 crc kubenswrapper[4854]: I1125 09:39:26.346991 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5d1d7370dc33ea90c516897cf9f2780e07b9929e89f90ba521b24fa22b82f99b"} err="failed to get container status \"5d1d7370dc33ea90c516897cf9f2780e07b9929e89f90ba521b24fa22b82f99b\": rpc error: code = NotFound desc = could not find container \"5d1d7370dc33ea90c516897cf9f2780e07b9929e89f90ba521b24fa22b82f99b\": container with ID starting with 5d1d7370dc33ea90c516897cf9f2780e07b9929e89f90ba521b24fa22b82f99b not found: ID does not exist" Nov 25 09:39:26 crc kubenswrapper[4854]: I1125 09:39:26.796976 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w2t47" event={"ID":"9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909","Type":"ContainerStarted","Data":"60f5c23cd2467de147b95e9cd43207214ba917c1f3e25938928cf48682b3745f"} Nov 25 09:39:26 crc kubenswrapper[4854]: I1125 09:39:26.798630 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nkb9k" event={"ID":"e66197a5-5610-4f03-bfb3-4952c7d530e2","Type":"ContainerStarted","Data":"11a4433286e284d168e60355365205db2f28cb8a76522e109191ac0ce5331a9f"} Nov 25 09:39:26 crc kubenswrapper[4854]: I1125 09:39:26.801218 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4xq88" event={"ID":"3066e5f3-b1f4-4415-8a74-32f39d2f8926","Type":"ContainerStarted","Data":"353202a71f8c340411ff5012809f2bd5be751aa36304760e7165091a18969570"} Nov 25 09:39:26 crc kubenswrapper[4854]: I1125 09:39:26.802827 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-64qr8" event={"ID":"b66bda32-eed5-4ea0-b10c-065038dce52d","Type":"ContainerStarted","Data":"850d06e0bede6373eb00a6690de9f278ccb22a4ee61c555521331d76ee61eaa5"} Nov 25 09:39:27 crc kubenswrapper[4854]: I1125 09:39:27.825381 4854 generic.go:334] "Generic (PLEG): container finished" podID="9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909" containerID="60f5c23cd2467de147b95e9cd43207214ba917c1f3e25938928cf48682b3745f" exitCode=0 Nov 25 09:39:27 crc kubenswrapper[4854]: I1125 09:39:27.825438 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w2t47" event={"ID":"9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909","Type":"ContainerDied","Data":"60f5c23cd2467de147b95e9cd43207214ba917c1f3e25938928cf48682b3745f"} Nov 25 09:39:27 crc kubenswrapper[4854]: I1125 09:39:27.830070 4854 generic.go:334] "Generic (PLEG): container finished" podID="e66197a5-5610-4f03-bfb3-4952c7d530e2" containerID="11a4433286e284d168e60355365205db2f28cb8a76522e109191ac0ce5331a9f" exitCode=0 Nov 25 09:39:27 crc kubenswrapper[4854]: I1125 09:39:27.830119 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nkb9k" event={"ID":"e66197a5-5610-4f03-bfb3-4952c7d530e2","Type":"ContainerDied","Data":"11a4433286e284d168e60355365205db2f28cb8a76522e109191ac0ce5331a9f"} Nov 25 09:39:27 crc kubenswrapper[4854]: I1125 09:39:27.839581 4854 generic.go:334] "Generic (PLEG): container finished" podID="3066e5f3-b1f4-4415-8a74-32f39d2f8926" containerID="353202a71f8c340411ff5012809f2bd5be751aa36304760e7165091a18969570" exitCode=0 Nov 25 09:39:27 crc kubenswrapper[4854]: I1125 09:39:27.841755 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4xq88" event={"ID":"3066e5f3-b1f4-4415-8a74-32f39d2f8926","Type":"ContainerDied","Data":"353202a71f8c340411ff5012809f2bd5be751aa36304760e7165091a18969570"} Nov 25 09:39:27 crc kubenswrapper[4854]: I1125 09:39:27.903009 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-64qr8" podStartSLOduration=3.364848031 podStartE2EDuration="1m9.902992728s" podCreationTimestamp="2025-11-25 09:38:18 +0000 UTC" firstStartedPulling="2025-11-25 09:38:20.084458813 +0000 UTC m=+105.937452189" lastFinishedPulling="2025-11-25 09:39:26.62260351 +0000 UTC m=+172.475596886" observedRunningTime="2025-11-25 09:39:27.900377878 +0000 UTC m=+173.753371264" watchObservedRunningTime="2025-11-25 09:39:27.902992728 +0000 UTC m=+173.755986104" Nov 25 09:39:29 crc kubenswrapper[4854]: I1125 09:39:29.117822 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-64qr8" Nov 25 09:39:29 crc kubenswrapper[4854]: I1125 09:39:29.118204 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-64qr8" Nov 25 09:39:29 crc kubenswrapper[4854]: I1125 09:39:29.182105 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-64qr8" Nov 25 09:39:29 crc kubenswrapper[4854]: I1125 09:39:29.378323 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-zznfv" Nov 25 09:39:29 crc kubenswrapper[4854]: I1125 09:39:29.851537 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w2t47" event={"ID":"9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909","Type":"ContainerStarted","Data":"a771c987a8dce4a3603964f3ca974466fca594af3481e4bbbda9c1119c4e47b8"} Nov 25 09:39:29 crc kubenswrapper[4854]: I1125 09:39:29.853709 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nkb9k" event={"ID":"e66197a5-5610-4f03-bfb3-4952c7d530e2","Type":"ContainerStarted","Data":"7af5416643c7f17a2e802c3a06e0354cc9ebae39805cb0b318ee0ad636b9bc87"} Nov 25 09:39:29 crc kubenswrapper[4854]: I1125 09:39:29.855734 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4xq88" event={"ID":"3066e5f3-b1f4-4415-8a74-32f39d2f8926","Type":"ContainerStarted","Data":"15cab65cc8debb0d3245d11cac536a4705e1b096e541c6f206cef506e7dc2ba0"} Nov 25 09:39:29 crc kubenswrapper[4854]: I1125 09:39:29.872079 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-w2t47" podStartSLOduration=9.102965069 podStartE2EDuration="1m7.872060721s" podCreationTimestamp="2025-11-25 09:38:22 +0000 UTC" firstStartedPulling="2025-11-25 09:38:30.343560159 +0000 UTC m=+116.196553545" lastFinishedPulling="2025-11-25 09:39:29.112655821 +0000 UTC m=+174.965649197" observedRunningTime="2025-11-25 09:39:29.86968441 +0000 UTC m=+175.722677816" watchObservedRunningTime="2025-11-25 09:39:29.872060721 +0000 UTC m=+175.725054097" Nov 25 09:39:29 crc kubenswrapper[4854]: I1125 09:39:29.924612 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-4xq88" podStartSLOduration=3.11952007 podStartE2EDuration="1m10.924589563s" podCreationTimestamp="2025-11-25 09:38:19 +0000 UTC" firstStartedPulling="2025-11-25 09:38:21.148748836 +0000 UTC m=+107.001742202" lastFinishedPulling="2025-11-25 09:39:28.953818319 +0000 UTC m=+174.806811695" observedRunningTime="2025-11-25 09:39:29.899238805 +0000 UTC m=+175.752232181" watchObservedRunningTime="2025-11-25 09:39:29.924589563 +0000 UTC m=+175.777582939" Nov 25 09:39:32 crc kubenswrapper[4854]: I1125 09:39:32.483271 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-tdb4p" Nov 25 09:39:32 crc kubenswrapper[4854]: I1125 09:39:32.484530 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-tdb4p" Nov 25 09:39:32 crc kubenswrapper[4854]: I1125 09:39:32.529444 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-tdb4p" Nov 25 09:39:32 crc kubenswrapper[4854]: I1125 09:39:32.549006 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-nkb9k" podStartSLOduration=5.655658983 podStartE2EDuration="1m13.548983775s" podCreationTimestamp="2025-11-25 09:38:19 +0000 UTC" firstStartedPulling="2025-11-25 09:38:21.161134936 +0000 UTC m=+107.014128312" lastFinishedPulling="2025-11-25 09:39:29.054459728 +0000 UTC m=+174.907453104" observedRunningTime="2025-11-25 09:39:29.92381212 +0000 UTC m=+175.776805496" watchObservedRunningTime="2025-11-25 09:39:32.548983775 +0000 UTC m=+178.401977151" Nov 25 09:39:32 crc kubenswrapper[4854]: I1125 09:39:32.864909 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-w2t47" Nov 25 09:39:32 crc kubenswrapper[4854]: I1125 09:39:32.864957 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-w2t47" Nov 25 09:39:32 crc kubenswrapper[4854]: I1125 09:39:32.917009 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-tdb4p" Nov 25 09:39:33 crc kubenswrapper[4854]: I1125 09:39:33.911061 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-w2t47" podUID="9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909" containerName="registry-server" probeResult="failure" output=< Nov 25 09:39:33 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 09:39:33 crc kubenswrapper[4854]: > Nov 25 09:39:39 crc kubenswrapper[4854]: I1125 09:39:39.164493 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-64qr8" Nov 25 09:39:39 crc kubenswrapper[4854]: I1125 09:39:39.458066 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-nkb9k" Nov 25 09:39:39 crc kubenswrapper[4854]: I1125 09:39:39.458136 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-nkb9k" Nov 25 09:39:39 crc kubenswrapper[4854]: I1125 09:39:39.492885 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-nkb9k" Nov 25 09:39:39 crc kubenswrapper[4854]: I1125 09:39:39.674595 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-4xq88" Nov 25 09:39:39 crc kubenswrapper[4854]: I1125 09:39:39.674636 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-4xq88" Nov 25 09:39:39 crc kubenswrapper[4854]: I1125 09:39:39.738300 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-4xq88" Nov 25 09:39:39 crc kubenswrapper[4854]: I1125 09:39:39.950579 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-nkb9k" Nov 25 09:39:39 crc kubenswrapper[4854]: I1125 09:39:39.968827 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-4xq88" Nov 25 09:39:41 crc kubenswrapper[4854]: I1125 09:39:41.375659 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-nkb9k"] Nov 25 09:39:41 crc kubenswrapper[4854]: I1125 09:39:41.917099 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-nkb9k" podUID="e66197a5-5610-4f03-bfb3-4952c7d530e2" containerName="registry-server" containerID="cri-o://7af5416643c7f17a2e802c3a06e0354cc9ebae39805cb0b318ee0ad636b9bc87" gracePeriod=2 Nov 25 09:39:41 crc kubenswrapper[4854]: I1125 09:39:41.962813 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4xq88"] Nov 25 09:39:41 crc kubenswrapper[4854]: I1125 09:39:41.963076 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-4xq88" podUID="3066e5f3-b1f4-4415-8a74-32f39d2f8926" containerName="registry-server" containerID="cri-o://15cab65cc8debb0d3245d11cac536a4705e1b096e541c6f206cef506e7dc2ba0" gracePeriod=2 Nov 25 09:39:42 crc kubenswrapper[4854]: I1125 09:39:42.902534 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-w2t47" Nov 25 09:39:42 crc kubenswrapper[4854]: I1125 09:39:42.935821 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-w2t47" Nov 25 09:39:43 crc kubenswrapper[4854]: I1125 09:39:43.771699 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-w2t47"] Nov 25 09:39:43 crc kubenswrapper[4854]: I1125 09:39:43.931741 4854 generic.go:334] "Generic (PLEG): container finished" podID="3066e5f3-b1f4-4415-8a74-32f39d2f8926" containerID="15cab65cc8debb0d3245d11cac536a4705e1b096e541c6f206cef506e7dc2ba0" exitCode=0 Nov 25 09:39:43 crc kubenswrapper[4854]: I1125 09:39:43.931848 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4xq88" event={"ID":"3066e5f3-b1f4-4415-8a74-32f39d2f8926","Type":"ContainerDied","Data":"15cab65cc8debb0d3245d11cac536a4705e1b096e541c6f206cef506e7dc2ba0"} Nov 25 09:39:43 crc kubenswrapper[4854]: I1125 09:39:43.937364 4854 generic.go:334] "Generic (PLEG): container finished" podID="e66197a5-5610-4f03-bfb3-4952c7d530e2" containerID="7af5416643c7f17a2e802c3a06e0354cc9ebae39805cb0b318ee0ad636b9bc87" exitCode=0 Nov 25 09:39:43 crc kubenswrapper[4854]: I1125 09:39:43.937406 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nkb9k" event={"ID":"e66197a5-5610-4f03-bfb3-4952c7d530e2","Type":"ContainerDied","Data":"7af5416643c7f17a2e802c3a06e0354cc9ebae39805cb0b318ee0ad636b9bc87"} Nov 25 09:39:43 crc kubenswrapper[4854]: I1125 09:39:43.937881 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-w2t47" podUID="9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909" containerName="registry-server" containerID="cri-o://a771c987a8dce4a3603964f3ca974466fca594af3481e4bbbda9c1119c4e47b8" gracePeriod=2 Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.118769 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.195105 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nkb9k" Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.263807 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e66197a5-5610-4f03-bfb3-4952c7d530e2-catalog-content\") pod \"e66197a5-5610-4f03-bfb3-4952c7d530e2\" (UID: \"e66197a5-5610-4f03-bfb3-4952c7d530e2\") " Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.265156 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e66197a5-5610-4f03-bfb3-4952c7d530e2-utilities\") pod \"e66197a5-5610-4f03-bfb3-4952c7d530e2\" (UID: \"e66197a5-5610-4f03-bfb3-4952c7d530e2\") " Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.265188 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fmnhp\" (UniqueName: \"kubernetes.io/projected/e66197a5-5610-4f03-bfb3-4952c7d530e2-kube-api-access-fmnhp\") pod \"e66197a5-5610-4f03-bfb3-4952c7d530e2\" (UID: \"e66197a5-5610-4f03-bfb3-4952c7d530e2\") " Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.265882 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e66197a5-5610-4f03-bfb3-4952c7d530e2-utilities" (OuterVolumeSpecName: "utilities") pod "e66197a5-5610-4f03-bfb3-4952c7d530e2" (UID: "e66197a5-5610-4f03-bfb3-4952c7d530e2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.270781 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e66197a5-5610-4f03-bfb3-4952c7d530e2-kube-api-access-fmnhp" (OuterVolumeSpecName: "kube-api-access-fmnhp") pod "e66197a5-5610-4f03-bfb3-4952c7d530e2" (UID: "e66197a5-5610-4f03-bfb3-4952c7d530e2"). InnerVolumeSpecName "kube-api-access-fmnhp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.318763 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e66197a5-5610-4f03-bfb3-4952c7d530e2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e66197a5-5610-4f03-bfb3-4952c7d530e2" (UID: "e66197a5-5610-4f03-bfb3-4952c7d530e2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.366215 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e66197a5-5610-4f03-bfb3-4952c7d530e2-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.366240 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e66197a5-5610-4f03-bfb3-4952c7d530e2-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.366252 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fmnhp\" (UniqueName: \"kubernetes.io/projected/e66197a5-5610-4f03-bfb3-4952c7d530e2-kube-api-access-fmnhp\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.398714 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4xq88" Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.467629 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ds4rn\" (UniqueName: \"kubernetes.io/projected/3066e5f3-b1f4-4415-8a74-32f39d2f8926-kube-api-access-ds4rn\") pod \"3066e5f3-b1f4-4415-8a74-32f39d2f8926\" (UID: \"3066e5f3-b1f4-4415-8a74-32f39d2f8926\") " Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.467748 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3066e5f3-b1f4-4415-8a74-32f39d2f8926-catalog-content\") pod \"3066e5f3-b1f4-4415-8a74-32f39d2f8926\" (UID: \"3066e5f3-b1f4-4415-8a74-32f39d2f8926\") " Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.467845 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3066e5f3-b1f4-4415-8a74-32f39d2f8926-utilities\") pod \"3066e5f3-b1f4-4415-8a74-32f39d2f8926\" (UID: \"3066e5f3-b1f4-4415-8a74-32f39d2f8926\") " Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.469354 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3066e5f3-b1f4-4415-8a74-32f39d2f8926-utilities" (OuterVolumeSpecName: "utilities") pod "3066e5f3-b1f4-4415-8a74-32f39d2f8926" (UID: "3066e5f3-b1f4-4415-8a74-32f39d2f8926"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.471367 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3066e5f3-b1f4-4415-8a74-32f39d2f8926-kube-api-access-ds4rn" (OuterVolumeSpecName: "kube-api-access-ds4rn") pod "3066e5f3-b1f4-4415-8a74-32f39d2f8926" (UID: "3066e5f3-b1f4-4415-8a74-32f39d2f8926"). InnerVolumeSpecName "kube-api-access-ds4rn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.543512 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3066e5f3-b1f4-4415-8a74-32f39d2f8926-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3066e5f3-b1f4-4415-8a74-32f39d2f8926" (UID: "3066e5f3-b1f4-4415-8a74-32f39d2f8926"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.569532 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3066e5f3-b1f4-4415-8a74-32f39d2f8926-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.569577 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ds4rn\" (UniqueName: \"kubernetes.io/projected/3066e5f3-b1f4-4415-8a74-32f39d2f8926-kube-api-access-ds4rn\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.569593 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3066e5f3-b1f4-4415-8a74-32f39d2f8926-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.864304 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-w2t47" Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.944905 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4xq88" Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.944904 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4xq88" event={"ID":"3066e5f3-b1f4-4415-8a74-32f39d2f8926","Type":"ContainerDied","Data":"e7a66ad9be5b5e3ab5c06fe977aadf60f5a9c9d6c15d6f999887a7dea80e812a"} Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.945046 4854 scope.go:117] "RemoveContainer" containerID="15cab65cc8debb0d3245d11cac536a4705e1b096e541c6f206cef506e7dc2ba0" Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.947574 4854 generic.go:334] "Generic (PLEG): container finished" podID="9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909" containerID="a771c987a8dce4a3603964f3ca974466fca594af3481e4bbbda9c1119c4e47b8" exitCode=0 Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.947646 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-w2t47" Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.947699 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w2t47" event={"ID":"9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909","Type":"ContainerDied","Data":"a771c987a8dce4a3603964f3ca974466fca594af3481e4bbbda9c1119c4e47b8"} Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.947722 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w2t47" event={"ID":"9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909","Type":"ContainerDied","Data":"a696661bb93d17a5762e135de9f808f2b1b96e17852d6a906788de8e3f28e0db"} Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.950886 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nkb9k" event={"ID":"e66197a5-5610-4f03-bfb3-4952c7d530e2","Type":"ContainerDied","Data":"b04699a09c4024f37f92a70f56d520dc9d0464cb1d6898bfbbb41da44b174103"} Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.950992 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nkb9k" Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.969117 4854 scope.go:117] "RemoveContainer" containerID="353202a71f8c340411ff5012809f2bd5be751aa36304760e7165091a18969570" Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.974025 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7xvk5\" (UniqueName: \"kubernetes.io/projected/9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909-kube-api-access-7xvk5\") pod \"9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909\" (UID: \"9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909\") " Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.974110 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909-utilities\") pod \"9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909\" (UID: \"9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909\") " Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.974145 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909-catalog-content\") pod \"9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909\" (UID: \"9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909\") " Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.975638 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909-utilities" (OuterVolumeSpecName: "utilities") pod "9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909" (UID: "9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.975833 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.982345 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4xq88"] Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.986947 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-4xq88"] Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.995305 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909-kube-api-access-7xvk5" (OuterVolumeSpecName: "kube-api-access-7xvk5") pod "9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909" (UID: "9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909"). InnerVolumeSpecName "kube-api-access-7xvk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.995780 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-nkb9k"] Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.997120 4854 scope.go:117] "RemoveContainer" containerID="1f69c588fb470d241c361934a908344f127a75c71ac44fe0cd92e7736e8f14f8" Nov 25 09:39:44 crc kubenswrapper[4854]: I1125 09:39:44.998912 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-nkb9k"] Nov 25 09:39:45 crc kubenswrapper[4854]: I1125 09:39:45.009985 4854 scope.go:117] "RemoveContainer" containerID="a771c987a8dce4a3603964f3ca974466fca594af3481e4bbbda9c1119c4e47b8" Nov 25 09:39:45 crc kubenswrapper[4854]: I1125 09:39:45.019989 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3066e5f3-b1f4-4415-8a74-32f39d2f8926" path="/var/lib/kubelet/pods/3066e5f3-b1f4-4415-8a74-32f39d2f8926/volumes" Nov 25 09:39:45 crc kubenswrapper[4854]: I1125 09:39:45.020623 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e66197a5-5610-4f03-bfb3-4952c7d530e2" path="/var/lib/kubelet/pods/e66197a5-5610-4f03-bfb3-4952c7d530e2/volumes" Nov 25 09:39:45 crc kubenswrapper[4854]: I1125 09:39:45.021848 4854 scope.go:117] "RemoveContainer" containerID="60f5c23cd2467de147b95e9cd43207214ba917c1f3e25938928cf48682b3745f" Nov 25 09:39:45 crc kubenswrapper[4854]: I1125 09:39:45.035364 4854 scope.go:117] "RemoveContainer" containerID="56291b9783cb25ac4b4ebc9d81e51360f9bd3242596298c3c3e81452a1868fb6" Nov 25 09:39:45 crc kubenswrapper[4854]: I1125 09:39:45.051638 4854 scope.go:117] "RemoveContainer" containerID="a771c987a8dce4a3603964f3ca974466fca594af3481e4bbbda9c1119c4e47b8" Nov 25 09:39:45 crc kubenswrapper[4854]: E1125 09:39:45.054857 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a771c987a8dce4a3603964f3ca974466fca594af3481e4bbbda9c1119c4e47b8\": container with ID starting with a771c987a8dce4a3603964f3ca974466fca594af3481e4bbbda9c1119c4e47b8 not found: ID does not exist" containerID="a771c987a8dce4a3603964f3ca974466fca594af3481e4bbbda9c1119c4e47b8" Nov 25 09:39:45 crc kubenswrapper[4854]: I1125 09:39:45.054911 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a771c987a8dce4a3603964f3ca974466fca594af3481e4bbbda9c1119c4e47b8"} err="failed to get container status \"a771c987a8dce4a3603964f3ca974466fca594af3481e4bbbda9c1119c4e47b8\": rpc error: code = NotFound desc = could not find container \"a771c987a8dce4a3603964f3ca974466fca594af3481e4bbbda9c1119c4e47b8\": container with ID starting with a771c987a8dce4a3603964f3ca974466fca594af3481e4bbbda9c1119c4e47b8 not found: ID does not exist" Nov 25 09:39:45 crc kubenswrapper[4854]: I1125 09:39:45.054948 4854 scope.go:117] "RemoveContainer" containerID="60f5c23cd2467de147b95e9cd43207214ba917c1f3e25938928cf48682b3745f" Nov 25 09:39:45 crc kubenswrapper[4854]: E1125 09:39:45.055466 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"60f5c23cd2467de147b95e9cd43207214ba917c1f3e25938928cf48682b3745f\": container with ID starting with 60f5c23cd2467de147b95e9cd43207214ba917c1f3e25938928cf48682b3745f not found: ID does not exist" containerID="60f5c23cd2467de147b95e9cd43207214ba917c1f3e25938928cf48682b3745f" Nov 25 09:39:45 crc kubenswrapper[4854]: I1125 09:39:45.055491 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60f5c23cd2467de147b95e9cd43207214ba917c1f3e25938928cf48682b3745f"} err="failed to get container status \"60f5c23cd2467de147b95e9cd43207214ba917c1f3e25938928cf48682b3745f\": rpc error: code = NotFound desc = could not find container \"60f5c23cd2467de147b95e9cd43207214ba917c1f3e25938928cf48682b3745f\": container with ID starting with 60f5c23cd2467de147b95e9cd43207214ba917c1f3e25938928cf48682b3745f not found: ID does not exist" Nov 25 09:39:45 crc kubenswrapper[4854]: I1125 09:39:45.055512 4854 scope.go:117] "RemoveContainer" containerID="56291b9783cb25ac4b4ebc9d81e51360f9bd3242596298c3c3e81452a1868fb6" Nov 25 09:39:45 crc kubenswrapper[4854]: E1125 09:39:45.056091 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"56291b9783cb25ac4b4ebc9d81e51360f9bd3242596298c3c3e81452a1868fb6\": container with ID starting with 56291b9783cb25ac4b4ebc9d81e51360f9bd3242596298c3c3e81452a1868fb6 not found: ID does not exist" containerID="56291b9783cb25ac4b4ebc9d81e51360f9bd3242596298c3c3e81452a1868fb6" Nov 25 09:39:45 crc kubenswrapper[4854]: I1125 09:39:45.056118 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56291b9783cb25ac4b4ebc9d81e51360f9bd3242596298c3c3e81452a1868fb6"} err="failed to get container status \"56291b9783cb25ac4b4ebc9d81e51360f9bd3242596298c3c3e81452a1868fb6\": rpc error: code = NotFound desc = could not find container \"56291b9783cb25ac4b4ebc9d81e51360f9bd3242596298c3c3e81452a1868fb6\": container with ID starting with 56291b9783cb25ac4b4ebc9d81e51360f9bd3242596298c3c3e81452a1868fb6 not found: ID does not exist" Nov 25 09:39:45 crc kubenswrapper[4854]: I1125 09:39:45.056136 4854 scope.go:117] "RemoveContainer" containerID="7af5416643c7f17a2e802c3a06e0354cc9ebae39805cb0b318ee0ad636b9bc87" Nov 25 09:39:45 crc kubenswrapper[4854]: I1125 09:39:45.063975 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909" (UID: "9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:39:45 crc kubenswrapper[4854]: I1125 09:39:45.079082 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7xvk5\" (UniqueName: \"kubernetes.io/projected/9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909-kube-api-access-7xvk5\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:45 crc kubenswrapper[4854]: I1125 09:39:45.079112 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:45 crc kubenswrapper[4854]: I1125 09:39:45.086826 4854 scope.go:117] "RemoveContainer" containerID="11a4433286e284d168e60355365205db2f28cb8a76522e109191ac0ce5331a9f" Nov 25 09:39:45 crc kubenswrapper[4854]: I1125 09:39:45.098573 4854 scope.go:117] "RemoveContainer" containerID="743b0bb31af3221c8ff6d9fb7e9bdcfe848e2ff5ad8c680626a7f9f9273a3994" Nov 25 09:39:45 crc kubenswrapper[4854]: I1125 09:39:45.279609 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-w2t47"] Nov 25 09:39:45 crc kubenswrapper[4854]: I1125 09:39:45.283616 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-w2t47"] Nov 25 09:39:47 crc kubenswrapper[4854]: I1125 09:39:47.020141 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909" path="/var/lib/kubelet/pods/9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909/volumes" Nov 25 09:39:50 crc kubenswrapper[4854]: I1125 09:39:50.415700 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-62dll" podUID="9dab85a1-11f8-45ee-ab81-394ead31aab5" containerName="oauth-openshift" containerID="cri-o://7d68198c16d489c23c6fb9d834cbd8fe748f0e9863c701f4826dd2cbcb53874b" gracePeriod=15 Nov 25 09:39:50 crc kubenswrapper[4854]: I1125 09:39:50.787733 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:39:50 crc kubenswrapper[4854]: I1125 09:39:50.947057 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-session\") pod \"9dab85a1-11f8-45ee-ab81-394ead31aab5\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " Nov 25 09:39:50 crc kubenswrapper[4854]: I1125 09:39:50.947123 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-service-ca\") pod \"9dab85a1-11f8-45ee-ab81-394ead31aab5\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " Nov 25 09:39:50 crc kubenswrapper[4854]: I1125 09:39:50.947157 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xjds4\" (UniqueName: \"kubernetes.io/projected/9dab85a1-11f8-45ee-ab81-394ead31aab5-kube-api-access-xjds4\") pod \"9dab85a1-11f8-45ee-ab81-394ead31aab5\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " Nov 25 09:39:50 crc kubenswrapper[4854]: I1125 09:39:50.947174 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/9dab85a1-11f8-45ee-ab81-394ead31aab5-audit-policies\") pod \"9dab85a1-11f8-45ee-ab81-394ead31aab5\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " Nov 25 09:39:50 crc kubenswrapper[4854]: I1125 09:39:50.947199 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-user-template-error\") pod \"9dab85a1-11f8-45ee-ab81-394ead31aab5\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " Nov 25 09:39:50 crc kubenswrapper[4854]: I1125 09:39:50.947235 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-user-idp-0-file-data\") pod \"9dab85a1-11f8-45ee-ab81-394ead31aab5\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " Nov 25 09:39:50 crc kubenswrapper[4854]: I1125 09:39:50.947260 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-router-certs\") pod \"9dab85a1-11f8-45ee-ab81-394ead31aab5\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " Nov 25 09:39:50 crc kubenswrapper[4854]: I1125 09:39:50.947287 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/9dab85a1-11f8-45ee-ab81-394ead31aab5-audit-dir\") pod \"9dab85a1-11f8-45ee-ab81-394ead31aab5\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " Nov 25 09:39:50 crc kubenswrapper[4854]: I1125 09:39:50.947307 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-user-template-login\") pod \"9dab85a1-11f8-45ee-ab81-394ead31aab5\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " Nov 25 09:39:50 crc kubenswrapper[4854]: I1125 09:39:50.947324 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-serving-cert\") pod \"9dab85a1-11f8-45ee-ab81-394ead31aab5\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " Nov 25 09:39:50 crc kubenswrapper[4854]: I1125 09:39:50.947358 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-user-template-provider-selection\") pod \"9dab85a1-11f8-45ee-ab81-394ead31aab5\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " Nov 25 09:39:50 crc kubenswrapper[4854]: I1125 09:39:50.947398 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-cliconfig\") pod \"9dab85a1-11f8-45ee-ab81-394ead31aab5\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " Nov 25 09:39:50 crc kubenswrapper[4854]: I1125 09:39:50.947421 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-trusted-ca-bundle\") pod \"9dab85a1-11f8-45ee-ab81-394ead31aab5\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " Nov 25 09:39:50 crc kubenswrapper[4854]: I1125 09:39:50.947449 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-ocp-branding-template\") pod \"9dab85a1-11f8-45ee-ab81-394ead31aab5\" (UID: \"9dab85a1-11f8-45ee-ab81-394ead31aab5\") " Nov 25 09:39:50 crc kubenswrapper[4854]: I1125 09:39:50.947918 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9dab85a1-11f8-45ee-ab81-394ead31aab5-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "9dab85a1-11f8-45ee-ab81-394ead31aab5" (UID: "9dab85a1-11f8-45ee-ab81-394ead31aab5"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:39:50 crc kubenswrapper[4854]: I1125 09:39:50.949473 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9dab85a1-11f8-45ee-ab81-394ead31aab5-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "9dab85a1-11f8-45ee-ab81-394ead31aab5" (UID: "9dab85a1-11f8-45ee-ab81-394ead31aab5"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:39:50 crc kubenswrapper[4854]: I1125 09:39:50.949516 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "9dab85a1-11f8-45ee-ab81-394ead31aab5" (UID: "9dab85a1-11f8-45ee-ab81-394ead31aab5"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:39:50 crc kubenswrapper[4854]: I1125 09:39:50.949609 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "9dab85a1-11f8-45ee-ab81-394ead31aab5" (UID: "9dab85a1-11f8-45ee-ab81-394ead31aab5"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:39:50 crc kubenswrapper[4854]: I1125 09:39:50.950074 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "9dab85a1-11f8-45ee-ab81-394ead31aab5" (UID: "9dab85a1-11f8-45ee-ab81-394ead31aab5"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:39:50 crc kubenswrapper[4854]: I1125 09:39:50.953571 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9dab85a1-11f8-45ee-ab81-394ead31aab5-kube-api-access-xjds4" (OuterVolumeSpecName: "kube-api-access-xjds4") pod "9dab85a1-11f8-45ee-ab81-394ead31aab5" (UID: "9dab85a1-11f8-45ee-ab81-394ead31aab5"). InnerVolumeSpecName "kube-api-access-xjds4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:39:50 crc kubenswrapper[4854]: I1125 09:39:50.954313 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "9dab85a1-11f8-45ee-ab81-394ead31aab5" (UID: "9dab85a1-11f8-45ee-ab81-394ead31aab5"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:39:50 crc kubenswrapper[4854]: I1125 09:39:50.954643 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "9dab85a1-11f8-45ee-ab81-394ead31aab5" (UID: "9dab85a1-11f8-45ee-ab81-394ead31aab5"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:39:50 crc kubenswrapper[4854]: I1125 09:39:50.954988 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "9dab85a1-11f8-45ee-ab81-394ead31aab5" (UID: "9dab85a1-11f8-45ee-ab81-394ead31aab5"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:39:50 crc kubenswrapper[4854]: I1125 09:39:50.955432 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "9dab85a1-11f8-45ee-ab81-394ead31aab5" (UID: "9dab85a1-11f8-45ee-ab81-394ead31aab5"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:39:50 crc kubenswrapper[4854]: I1125 09:39:50.959859 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "9dab85a1-11f8-45ee-ab81-394ead31aab5" (UID: "9dab85a1-11f8-45ee-ab81-394ead31aab5"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:39:50 crc kubenswrapper[4854]: I1125 09:39:50.960067 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "9dab85a1-11f8-45ee-ab81-394ead31aab5" (UID: "9dab85a1-11f8-45ee-ab81-394ead31aab5"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:39:50 crc kubenswrapper[4854]: I1125 09:39:50.960346 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "9dab85a1-11f8-45ee-ab81-394ead31aab5" (UID: "9dab85a1-11f8-45ee-ab81-394ead31aab5"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:39:50 crc kubenswrapper[4854]: I1125 09:39:50.961225 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "9dab85a1-11f8-45ee-ab81-394ead31aab5" (UID: "9dab85a1-11f8-45ee-ab81-394ead31aab5"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:39:50 crc kubenswrapper[4854]: I1125 09:39:50.992132 4854 generic.go:334] "Generic (PLEG): container finished" podID="9dab85a1-11f8-45ee-ab81-394ead31aab5" containerID="7d68198c16d489c23c6fb9d834cbd8fe748f0e9863c701f4826dd2cbcb53874b" exitCode=0 Nov 25 09:39:50 crc kubenswrapper[4854]: I1125 09:39:50.992174 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-62dll" event={"ID":"9dab85a1-11f8-45ee-ab81-394ead31aab5","Type":"ContainerDied","Data":"7d68198c16d489c23c6fb9d834cbd8fe748f0e9863c701f4826dd2cbcb53874b"} Nov 25 09:39:50 crc kubenswrapper[4854]: I1125 09:39:50.992191 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-62dll" Nov 25 09:39:50 crc kubenswrapper[4854]: I1125 09:39:50.992225 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-62dll" event={"ID":"9dab85a1-11f8-45ee-ab81-394ead31aab5","Type":"ContainerDied","Data":"c7dfb0bbf86fd9bd0e9d6c2a07a0c0704826ff5d674c1581a8ebfe9c8495a6d7"} Nov 25 09:39:50 crc kubenswrapper[4854]: I1125 09:39:50.992277 4854 scope.go:117] "RemoveContainer" containerID="7d68198c16d489c23c6fb9d834cbd8fe748f0e9863c701f4826dd2cbcb53874b" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.016730 4854 scope.go:117] "RemoveContainer" containerID="7d68198c16d489c23c6fb9d834cbd8fe748f0e9863c701f4826dd2cbcb53874b" Nov 25 09:39:51 crc kubenswrapper[4854]: E1125 09:39:51.017104 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d68198c16d489c23c6fb9d834cbd8fe748f0e9863c701f4826dd2cbcb53874b\": container with ID starting with 7d68198c16d489c23c6fb9d834cbd8fe748f0e9863c701f4826dd2cbcb53874b not found: ID does not exist" containerID="7d68198c16d489c23c6fb9d834cbd8fe748f0e9863c701f4826dd2cbcb53874b" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.017152 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d68198c16d489c23c6fb9d834cbd8fe748f0e9863c701f4826dd2cbcb53874b"} err="failed to get container status \"7d68198c16d489c23c6fb9d834cbd8fe748f0e9863c701f4826dd2cbcb53874b\": rpc error: code = NotFound desc = could not find container \"7d68198c16d489c23c6fb9d834cbd8fe748f0e9863c701f4826dd2cbcb53874b\": container with ID starting with 7d68198c16d489c23c6fb9d834cbd8fe748f0e9863c701f4826dd2cbcb53874b not found: ID does not exist" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.030949 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-62dll"] Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.033499 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-62dll"] Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.048494 4854 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/9dab85a1-11f8-45ee-ab81-394ead31aab5-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.048553 4854 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.048573 4854 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.048596 4854 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.048616 4854 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.048633 4854 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.048650 4854 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.048691 4854 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.048709 4854 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.048726 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xjds4\" (UniqueName: \"kubernetes.io/projected/9dab85a1-11f8-45ee-ab81-394ead31aab5-kube-api-access-xjds4\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.048741 4854 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/9dab85a1-11f8-45ee-ab81-394ead31aab5-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.048757 4854 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.048773 4854 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.048789 4854 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/9dab85a1-11f8-45ee-ab81-394ead31aab5-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.083973 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-54f75f9d4b-czwzn"] Nov 25 09:39:51 crc kubenswrapper[4854]: E1125 09:39:51.084248 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51de5364-478f-4774-a9ed-230222a4a161" containerName="extract-utilities" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.084263 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="51de5364-478f-4774-a9ed-230222a4a161" containerName="extract-utilities" Nov 25 09:39:51 crc kubenswrapper[4854]: E1125 09:39:51.084276 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9dab85a1-11f8-45ee-ab81-394ead31aab5" containerName="oauth-openshift" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.084286 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="9dab85a1-11f8-45ee-ab81-394ead31aab5" containerName="oauth-openshift" Nov 25 09:39:51 crc kubenswrapper[4854]: E1125 09:39:51.084297 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909" containerName="extract-utilities" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.084305 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909" containerName="extract-utilities" Nov 25 09:39:51 crc kubenswrapper[4854]: E1125 09:39:51.084315 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e66197a5-5610-4f03-bfb3-4952c7d530e2" containerName="registry-server" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.084329 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="e66197a5-5610-4f03-bfb3-4952c7d530e2" containerName="registry-server" Nov 25 09:39:51 crc kubenswrapper[4854]: E1125 09:39:51.084342 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51de5364-478f-4774-a9ed-230222a4a161" containerName="registry-server" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.084349 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="51de5364-478f-4774-a9ed-230222a4a161" containerName="registry-server" Nov 25 09:39:51 crc kubenswrapper[4854]: E1125 09:39:51.084365 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3066e5f3-b1f4-4415-8a74-32f39d2f8926" containerName="extract-content" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.084372 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="3066e5f3-b1f4-4415-8a74-32f39d2f8926" containerName="extract-content" Nov 25 09:39:51 crc kubenswrapper[4854]: E1125 09:39:51.084384 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3066e5f3-b1f4-4415-8a74-32f39d2f8926" containerName="registry-server" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.084392 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="3066e5f3-b1f4-4415-8a74-32f39d2f8926" containerName="registry-server" Nov 25 09:39:51 crc kubenswrapper[4854]: E1125 09:39:51.084401 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e66197a5-5610-4f03-bfb3-4952c7d530e2" containerName="extract-content" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.084408 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="e66197a5-5610-4f03-bfb3-4952c7d530e2" containerName="extract-content" Nov 25 09:39:51 crc kubenswrapper[4854]: E1125 09:39:51.084417 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e66197a5-5610-4f03-bfb3-4952c7d530e2" containerName="extract-utilities" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.084424 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="e66197a5-5610-4f03-bfb3-4952c7d530e2" containerName="extract-utilities" Nov 25 09:39:51 crc kubenswrapper[4854]: E1125 09:39:51.084434 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909" containerName="registry-server" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.084441 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909" containerName="registry-server" Nov 25 09:39:51 crc kubenswrapper[4854]: E1125 09:39:51.084452 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0486c15c-f0e4-4012-897f-7d574a054e02" containerName="pruner" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.084458 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="0486c15c-f0e4-4012-897f-7d574a054e02" containerName="pruner" Nov 25 09:39:51 crc kubenswrapper[4854]: E1125 09:39:51.084466 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3066e5f3-b1f4-4415-8a74-32f39d2f8926" containerName="extract-utilities" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.084473 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="3066e5f3-b1f4-4415-8a74-32f39d2f8926" containerName="extract-utilities" Nov 25 09:39:51 crc kubenswrapper[4854]: E1125 09:39:51.084483 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51de5364-478f-4774-a9ed-230222a4a161" containerName="extract-content" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.084490 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="51de5364-478f-4774-a9ed-230222a4a161" containerName="extract-content" Nov 25 09:39:51 crc kubenswrapper[4854]: E1125 09:39:51.084501 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909" containerName="extract-content" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.084509 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909" containerName="extract-content" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.084634 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="e66197a5-5610-4f03-bfb3-4952c7d530e2" containerName="registry-server" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.084648 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="51de5364-478f-4774-a9ed-230222a4a161" containerName="registry-server" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.084658 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="0486c15c-f0e4-4012-897f-7d574a054e02" containerName="pruner" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.084688 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="9dab85a1-11f8-45ee-ab81-394ead31aab5" containerName="oauth-openshift" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.084699 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="3066e5f3-b1f4-4415-8a74-32f39d2f8926" containerName="registry-server" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.084709 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d3d0ba6-9ff0-4c6e-9ecf-e7e67e7b6909" containerName="registry-server" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.085154 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.089034 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.089101 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.089164 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.089311 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.089350 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.089416 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.089917 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.090006 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.094641 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.094843 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.095920 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.096007 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.108038 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-54f75f9d4b-czwzn"] Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.120708 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.129129 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.130267 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.149451 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/06485da6-bec7-4a96-8fb8-3249d6ee88e6-v4-0-config-system-session\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.149508 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/06485da6-bec7-4a96-8fb8-3249d6ee88e6-v4-0-config-system-router-certs\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.149535 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/06485da6-bec7-4a96-8fb8-3249d6ee88e6-v4-0-config-system-serving-cert\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.149729 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/06485da6-bec7-4a96-8fb8-3249d6ee88e6-audit-policies\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.149785 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/06485da6-bec7-4a96-8fb8-3249d6ee88e6-v4-0-config-user-template-login\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.149886 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/06485da6-bec7-4a96-8fb8-3249d6ee88e6-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.149924 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/06485da6-bec7-4a96-8fb8-3249d6ee88e6-v4-0-config-system-cliconfig\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.149954 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/06485da6-bec7-4a96-8fb8-3249d6ee88e6-audit-dir\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.149972 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/06485da6-bec7-4a96-8fb8-3249d6ee88e6-v4-0-config-system-service-ca\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.149991 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/06485da6-bec7-4a96-8fb8-3249d6ee88e6-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.150013 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/06485da6-bec7-4a96-8fb8-3249d6ee88e6-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.150040 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/06485da6-bec7-4a96-8fb8-3249d6ee88e6-v4-0-config-user-template-error\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.150065 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/06485da6-bec7-4a96-8fb8-3249d6ee88e6-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.150091 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-77t5p\" (UniqueName: \"kubernetes.io/projected/06485da6-bec7-4a96-8fb8-3249d6ee88e6-kube-api-access-77t5p\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.250543 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/06485da6-bec7-4a96-8fb8-3249d6ee88e6-audit-policies\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.250612 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/06485da6-bec7-4a96-8fb8-3249d6ee88e6-v4-0-config-user-template-login\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.250697 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/06485da6-bec7-4a96-8fb8-3249d6ee88e6-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.250723 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/06485da6-bec7-4a96-8fb8-3249d6ee88e6-v4-0-config-system-cliconfig\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.250752 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/06485da6-bec7-4a96-8fb8-3249d6ee88e6-audit-dir\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.250776 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/06485da6-bec7-4a96-8fb8-3249d6ee88e6-v4-0-config-system-service-ca\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.250797 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/06485da6-bec7-4a96-8fb8-3249d6ee88e6-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.250832 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/06485da6-bec7-4a96-8fb8-3249d6ee88e6-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.250856 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/06485da6-bec7-4a96-8fb8-3249d6ee88e6-v4-0-config-user-template-error\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.250880 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/06485da6-bec7-4a96-8fb8-3249d6ee88e6-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.250905 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-77t5p\" (UniqueName: \"kubernetes.io/projected/06485da6-bec7-4a96-8fb8-3249d6ee88e6-kube-api-access-77t5p\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.250931 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/06485da6-bec7-4a96-8fb8-3249d6ee88e6-v4-0-config-system-session\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.250957 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/06485da6-bec7-4a96-8fb8-3249d6ee88e6-v4-0-config-system-router-certs\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.250979 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/06485da6-bec7-4a96-8fb8-3249d6ee88e6-v4-0-config-system-serving-cert\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.252234 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/06485da6-bec7-4a96-8fb8-3249d6ee88e6-audit-policies\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.252303 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/06485da6-bec7-4a96-8fb8-3249d6ee88e6-audit-dir\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.252239 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/06485da6-bec7-4a96-8fb8-3249d6ee88e6-v4-0-config-system-cliconfig\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.252917 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/06485da6-bec7-4a96-8fb8-3249d6ee88e6-v4-0-config-system-service-ca\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.254717 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/06485da6-bec7-4a96-8fb8-3249d6ee88e6-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.255614 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/06485da6-bec7-4a96-8fb8-3249d6ee88e6-v4-0-config-system-serving-cert\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.256321 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/06485da6-bec7-4a96-8fb8-3249d6ee88e6-v4-0-config-user-template-error\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.256770 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/06485da6-bec7-4a96-8fb8-3249d6ee88e6-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.257031 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/06485da6-bec7-4a96-8fb8-3249d6ee88e6-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.258528 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/06485da6-bec7-4a96-8fb8-3249d6ee88e6-v4-0-config-user-template-login\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.258529 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/06485da6-bec7-4a96-8fb8-3249d6ee88e6-v4-0-config-system-router-certs\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.259081 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/06485da6-bec7-4a96-8fb8-3249d6ee88e6-v4-0-config-system-session\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.262452 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/06485da6-bec7-4a96-8fb8-3249d6ee88e6-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.274100 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-77t5p\" (UniqueName: \"kubernetes.io/projected/06485da6-bec7-4a96-8fb8-3249d6ee88e6-kube-api-access-77t5p\") pod \"oauth-openshift-54f75f9d4b-czwzn\" (UID: \"06485da6-bec7-4a96-8fb8-3249d6ee88e6\") " pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.413233 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.639004 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-54f75f9d4b-czwzn"] Nov 25 09:39:51 crc kubenswrapper[4854]: I1125 09:39:51.999469 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" event={"ID":"06485da6-bec7-4a96-8fb8-3249d6ee88e6","Type":"ContainerStarted","Data":"079afcc28feca858cb3c991d3bd08b76b3e22fbe1005b856e14e689bd42e3be4"} Nov 25 09:39:52 crc kubenswrapper[4854]: I1125 09:39:51.999828 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" event={"ID":"06485da6-bec7-4a96-8fb8-3249d6ee88e6","Type":"ContainerStarted","Data":"0640cf54491adae63872aa9570f8581d317d27cddb381ef34eff3437175c6db4"} Nov 25 09:39:53 crc kubenswrapper[4854]: I1125 09:39:53.004640 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:53 crc kubenswrapper[4854]: I1125 09:39:53.012732 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" Nov 25 09:39:53 crc kubenswrapper[4854]: I1125 09:39:53.020422 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9dab85a1-11f8-45ee-ab81-394ead31aab5" path="/var/lib/kubelet/pods/9dab85a1-11f8-45ee-ab81-394ead31aab5/volumes" Nov 25 09:39:53 crc kubenswrapper[4854]: I1125 09:39:53.025796 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-54f75f9d4b-czwzn" podStartSLOduration=28.025771995 podStartE2EDuration="28.025771995s" podCreationTimestamp="2025-11-25 09:39:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:39:53.024877276 +0000 UTC m=+198.877870652" watchObservedRunningTime="2025-11-25 09:39:53.025771995 +0000 UTC m=+198.878765391" Nov 25 09:39:55 crc kubenswrapper[4854]: I1125 09:39:55.028763 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:39:55 crc kubenswrapper[4854]: I1125 09:39:55.028828 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:40:06 crc kubenswrapper[4854]: I1125 09:40:06.802753 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-64qr8"] Nov 25 09:40:06 crc kubenswrapper[4854]: I1125 09:40:06.803423 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-64qr8" podUID="b66bda32-eed5-4ea0-b10c-065038dce52d" containerName="registry-server" containerID="cri-o://850d06e0bede6373eb00a6690de9f278ccb22a4ee61c555521331d76ee61eaa5" gracePeriod=30 Nov 25 09:40:06 crc kubenswrapper[4854]: I1125 09:40:06.819421 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-zznfv"] Nov 25 09:40:06 crc kubenswrapper[4854]: I1125 09:40:06.819765 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-zznfv" podUID="621fc295-6eae-4091-950e-c883d64bf7b8" containerName="registry-server" containerID="cri-o://0b266a7c7a8f3f9c4fcfa6fb91d96e904f36fb12723ea8b7179aa292e7238eb0" gracePeriod=30 Nov 25 09:40:06 crc kubenswrapper[4854]: I1125 09:40:06.827881 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-mz9k6"] Nov 25 09:40:06 crc kubenswrapper[4854]: I1125 09:40:06.828118 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-mz9k6" podUID="9117eb3b-0187-4957-b851-da9e4c229c8f" containerName="marketplace-operator" containerID="cri-o://7c5ab223884486e83e8b38ca08cd3fd9f322a85ea0a85aa06a26c4a5c089a915" gracePeriod=30 Nov 25 09:40:06 crc kubenswrapper[4854]: I1125 09:40:06.835656 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8f2r9"] Nov 25 09:40:06 crc kubenswrapper[4854]: I1125 09:40:06.835909 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-8f2r9" podUID="0e545002-87d9-40ff-bf70-684e7b89f8f1" containerName="registry-server" containerID="cri-o://7299c0b17f8e0eff3480a90090e6f01be87a8996e09cc854c75e7620b66678f2" gracePeriod=30 Nov 25 09:40:06 crc kubenswrapper[4854]: I1125 09:40:06.842716 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-glqlm"] Nov 25 09:40:06 crc kubenswrapper[4854]: I1125 09:40:06.843787 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-glqlm" Nov 25 09:40:06 crc kubenswrapper[4854]: I1125 09:40:06.849870 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tdb4p"] Nov 25 09:40:06 crc kubenswrapper[4854]: I1125 09:40:06.850190 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-tdb4p" podUID="91299a1e-6ae1-44b2-ade3-912ba1568cfb" containerName="registry-server" containerID="cri-o://c2aab0c89795a8bdfc8f68ad97648efcec063ecddba5f253cc2842b5261fd717" gracePeriod=30 Nov 25 09:40:06 crc kubenswrapper[4854]: I1125 09:40:06.854954 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-glqlm"] Nov 25 09:40:06 crc kubenswrapper[4854]: I1125 09:40:06.867357 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s295f\" (UniqueName: \"kubernetes.io/projected/a6450a14-7c2a-4c3f-8861-876374e8cb9a-kube-api-access-s295f\") pod \"marketplace-operator-79b997595-glqlm\" (UID: \"a6450a14-7c2a-4c3f-8861-876374e8cb9a\") " pod="openshift-marketplace/marketplace-operator-79b997595-glqlm" Nov 25 09:40:06 crc kubenswrapper[4854]: I1125 09:40:06.868486 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a6450a14-7c2a-4c3f-8861-876374e8cb9a-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-glqlm\" (UID: \"a6450a14-7c2a-4c3f-8861-876374e8cb9a\") " pod="openshift-marketplace/marketplace-operator-79b997595-glqlm" Nov 25 09:40:06 crc kubenswrapper[4854]: I1125 09:40:06.868773 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/a6450a14-7c2a-4c3f-8861-876374e8cb9a-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-glqlm\" (UID: \"a6450a14-7c2a-4c3f-8861-876374e8cb9a\") " pod="openshift-marketplace/marketplace-operator-79b997595-glqlm" Nov 25 09:40:06 crc kubenswrapper[4854]: I1125 09:40:06.972719 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s295f\" (UniqueName: \"kubernetes.io/projected/a6450a14-7c2a-4c3f-8861-876374e8cb9a-kube-api-access-s295f\") pod \"marketplace-operator-79b997595-glqlm\" (UID: \"a6450a14-7c2a-4c3f-8861-876374e8cb9a\") " pod="openshift-marketplace/marketplace-operator-79b997595-glqlm" Nov 25 09:40:06 crc kubenswrapper[4854]: I1125 09:40:06.973053 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a6450a14-7c2a-4c3f-8861-876374e8cb9a-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-glqlm\" (UID: \"a6450a14-7c2a-4c3f-8861-876374e8cb9a\") " pod="openshift-marketplace/marketplace-operator-79b997595-glqlm" Nov 25 09:40:06 crc kubenswrapper[4854]: I1125 09:40:06.973116 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/a6450a14-7c2a-4c3f-8861-876374e8cb9a-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-glqlm\" (UID: \"a6450a14-7c2a-4c3f-8861-876374e8cb9a\") " pod="openshift-marketplace/marketplace-operator-79b997595-glqlm" Nov 25 09:40:06 crc kubenswrapper[4854]: I1125 09:40:06.976319 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a6450a14-7c2a-4c3f-8861-876374e8cb9a-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-glqlm\" (UID: \"a6450a14-7c2a-4c3f-8861-876374e8cb9a\") " pod="openshift-marketplace/marketplace-operator-79b997595-glqlm" Nov 25 09:40:06 crc kubenswrapper[4854]: I1125 09:40:06.980556 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/a6450a14-7c2a-4c3f-8861-876374e8cb9a-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-glqlm\" (UID: \"a6450a14-7c2a-4c3f-8861-876374e8cb9a\") " pod="openshift-marketplace/marketplace-operator-79b997595-glqlm" Nov 25 09:40:06 crc kubenswrapper[4854]: I1125 09:40:06.989320 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s295f\" (UniqueName: \"kubernetes.io/projected/a6450a14-7c2a-4c3f-8861-876374e8cb9a-kube-api-access-s295f\") pod \"marketplace-operator-79b997595-glqlm\" (UID: \"a6450a14-7c2a-4c3f-8861-876374e8cb9a\") " pod="openshift-marketplace/marketplace-operator-79b997595-glqlm" Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.080975 4854 generic.go:334] "Generic (PLEG): container finished" podID="9117eb3b-0187-4957-b851-da9e4c229c8f" containerID="7c5ab223884486e83e8b38ca08cd3fd9f322a85ea0a85aa06a26c4a5c089a915" exitCode=0 Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.081032 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-mz9k6" event={"ID":"9117eb3b-0187-4957-b851-da9e4c229c8f","Type":"ContainerDied","Data":"7c5ab223884486e83e8b38ca08cd3fd9f322a85ea0a85aa06a26c4a5c089a915"} Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.085291 4854 generic.go:334] "Generic (PLEG): container finished" podID="621fc295-6eae-4091-950e-c883d64bf7b8" containerID="0b266a7c7a8f3f9c4fcfa6fb91d96e904f36fb12723ea8b7179aa292e7238eb0" exitCode=0 Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.085313 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zznfv" event={"ID":"621fc295-6eae-4091-950e-c883d64bf7b8","Type":"ContainerDied","Data":"0b266a7c7a8f3f9c4fcfa6fb91d96e904f36fb12723ea8b7179aa292e7238eb0"} Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.089129 4854 generic.go:334] "Generic (PLEG): container finished" podID="0e545002-87d9-40ff-bf70-684e7b89f8f1" containerID="7299c0b17f8e0eff3480a90090e6f01be87a8996e09cc854c75e7620b66678f2" exitCode=0 Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.089171 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8f2r9" event={"ID":"0e545002-87d9-40ff-bf70-684e7b89f8f1","Type":"ContainerDied","Data":"7299c0b17f8e0eff3480a90090e6f01be87a8996e09cc854c75e7620b66678f2"} Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.096456 4854 generic.go:334] "Generic (PLEG): container finished" podID="91299a1e-6ae1-44b2-ade3-912ba1568cfb" containerID="c2aab0c89795a8bdfc8f68ad97648efcec063ecddba5f253cc2842b5261fd717" exitCode=0 Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.096521 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tdb4p" event={"ID":"91299a1e-6ae1-44b2-ade3-912ba1568cfb","Type":"ContainerDied","Data":"c2aab0c89795a8bdfc8f68ad97648efcec063ecddba5f253cc2842b5261fd717"} Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.099350 4854 generic.go:334] "Generic (PLEG): container finished" podID="b66bda32-eed5-4ea0-b10c-065038dce52d" containerID="850d06e0bede6373eb00a6690de9f278ccb22a4ee61c555521331d76ee61eaa5" exitCode=0 Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.099392 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-64qr8" event={"ID":"b66bda32-eed5-4ea0-b10c-065038dce52d","Type":"ContainerDied","Data":"850d06e0bede6373eb00a6690de9f278ccb22a4ee61c555521331d76ee61eaa5"} Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.247157 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-glqlm" Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.254064 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-64qr8" Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.276163 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b66bda32-eed5-4ea0-b10c-065038dce52d-utilities\") pod \"b66bda32-eed5-4ea0-b10c-065038dce52d\" (UID: \"b66bda32-eed5-4ea0-b10c-065038dce52d\") " Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.276215 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5wvsk\" (UniqueName: \"kubernetes.io/projected/b66bda32-eed5-4ea0-b10c-065038dce52d-kube-api-access-5wvsk\") pod \"b66bda32-eed5-4ea0-b10c-065038dce52d\" (UID: \"b66bda32-eed5-4ea0-b10c-065038dce52d\") " Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.277412 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b66bda32-eed5-4ea0-b10c-065038dce52d-utilities" (OuterVolumeSpecName: "utilities") pod "b66bda32-eed5-4ea0-b10c-065038dce52d" (UID: "b66bda32-eed5-4ea0-b10c-065038dce52d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.280129 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zznfv" Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.286844 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b66bda32-eed5-4ea0-b10c-065038dce52d-kube-api-access-5wvsk" (OuterVolumeSpecName: "kube-api-access-5wvsk") pod "b66bda32-eed5-4ea0-b10c-065038dce52d" (UID: "b66bda32-eed5-4ea0-b10c-065038dce52d"). InnerVolumeSpecName "kube-api-access-5wvsk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.290790 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-mz9k6" Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.306075 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8f2r9" Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.309030 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tdb4p" Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.376970 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b66bda32-eed5-4ea0-b10c-065038dce52d-catalog-content\") pod \"b66bda32-eed5-4ea0-b10c-065038dce52d\" (UID: \"b66bda32-eed5-4ea0-b10c-065038dce52d\") " Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.377267 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b66bda32-eed5-4ea0-b10c-065038dce52d-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.377280 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5wvsk\" (UniqueName: \"kubernetes.io/projected/b66bda32-eed5-4ea0-b10c-065038dce52d-kube-api-access-5wvsk\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.425297 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b66bda32-eed5-4ea0-b10c-065038dce52d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b66bda32-eed5-4ea0-b10c-065038dce52d" (UID: "b66bda32-eed5-4ea0-b10c-065038dce52d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.468848 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-glqlm"] Nov 25 09:40:07 crc kubenswrapper[4854]: W1125 09:40:07.471604 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda6450a14_7c2a_4c3f_8861_876374e8cb9a.slice/crio-b81d0e95110685e43048f68bae3322c0af48a9aee8d654c6ec88c9242692fb6c WatchSource:0}: Error finding container b81d0e95110685e43048f68bae3322c0af48a9aee8d654c6ec88c9242692fb6c: Status 404 returned error can't find the container with id b81d0e95110685e43048f68bae3322c0af48a9aee8d654c6ec88c9242692fb6c Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.477767 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e545002-87d9-40ff-bf70-684e7b89f8f1-utilities\") pod \"0e545002-87d9-40ff-bf70-684e7b89f8f1\" (UID: \"0e545002-87d9-40ff-bf70-684e7b89f8f1\") " Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.477846 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n9zrs\" (UniqueName: \"kubernetes.io/projected/91299a1e-6ae1-44b2-ade3-912ba1568cfb-kube-api-access-n9zrs\") pod \"91299a1e-6ae1-44b2-ade3-912ba1568cfb\" (UID: \"91299a1e-6ae1-44b2-ade3-912ba1568cfb\") " Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.477876 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/9117eb3b-0187-4957-b851-da9e4c229c8f-marketplace-operator-metrics\") pod \"9117eb3b-0187-4957-b851-da9e4c229c8f\" (UID: \"9117eb3b-0187-4957-b851-da9e4c229c8f\") " Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.478372 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/621fc295-6eae-4091-950e-c883d64bf7b8-utilities\") pod \"621fc295-6eae-4091-950e-c883d64bf7b8\" (UID: \"621fc295-6eae-4091-950e-c883d64bf7b8\") " Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.478461 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/621fc295-6eae-4091-950e-c883d64bf7b8-catalog-content\") pod \"621fc295-6eae-4091-950e-c883d64bf7b8\" (UID: \"621fc295-6eae-4091-950e-c883d64bf7b8\") " Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.478547 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0e545002-87d9-40ff-bf70-684e7b89f8f1-utilities" (OuterVolumeSpecName: "utilities") pod "0e545002-87d9-40ff-bf70-684e7b89f8f1" (UID: "0e545002-87d9-40ff-bf70-684e7b89f8f1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.478787 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/621fc295-6eae-4091-950e-c883d64bf7b8-utilities" (OuterVolumeSpecName: "utilities") pod "621fc295-6eae-4091-950e-c883d64bf7b8" (UID: "621fc295-6eae-4091-950e-c883d64bf7b8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.479711 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lqc2t\" (UniqueName: \"kubernetes.io/projected/0e545002-87d9-40ff-bf70-684e7b89f8f1-kube-api-access-lqc2t\") pod \"0e545002-87d9-40ff-bf70-684e7b89f8f1\" (UID: \"0e545002-87d9-40ff-bf70-684e7b89f8f1\") " Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.479775 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g5ml9\" (UniqueName: \"kubernetes.io/projected/621fc295-6eae-4091-950e-c883d64bf7b8-kube-api-access-g5ml9\") pod \"621fc295-6eae-4091-950e-c883d64bf7b8\" (UID: \"621fc295-6eae-4091-950e-c883d64bf7b8\") " Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.479821 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9117eb3b-0187-4957-b851-da9e4c229c8f-marketplace-trusted-ca\") pod \"9117eb3b-0187-4957-b851-da9e4c229c8f\" (UID: \"9117eb3b-0187-4957-b851-da9e4c229c8f\") " Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.479855 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91299a1e-6ae1-44b2-ade3-912ba1568cfb-catalog-content\") pod \"91299a1e-6ae1-44b2-ade3-912ba1568cfb\" (UID: \"91299a1e-6ae1-44b2-ade3-912ba1568cfb\") " Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.479892 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91299a1e-6ae1-44b2-ade3-912ba1568cfb-utilities\") pod \"91299a1e-6ae1-44b2-ade3-912ba1568cfb\" (UID: \"91299a1e-6ae1-44b2-ade3-912ba1568cfb\") " Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.479919 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wnglk\" (UniqueName: \"kubernetes.io/projected/9117eb3b-0187-4957-b851-da9e4c229c8f-kube-api-access-wnglk\") pod \"9117eb3b-0187-4957-b851-da9e4c229c8f\" (UID: \"9117eb3b-0187-4957-b851-da9e4c229c8f\") " Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.479944 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e545002-87d9-40ff-bf70-684e7b89f8f1-catalog-content\") pod \"0e545002-87d9-40ff-bf70-684e7b89f8f1\" (UID: \"0e545002-87d9-40ff-bf70-684e7b89f8f1\") " Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.480251 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/621fc295-6eae-4091-950e-c883d64bf7b8-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.480266 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b66bda32-eed5-4ea0-b10c-065038dce52d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.480275 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e545002-87d9-40ff-bf70-684e7b89f8f1-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.480839 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/91299a1e-6ae1-44b2-ade3-912ba1568cfb-kube-api-access-n9zrs" (OuterVolumeSpecName: "kube-api-access-n9zrs") pod "91299a1e-6ae1-44b2-ade3-912ba1568cfb" (UID: "91299a1e-6ae1-44b2-ade3-912ba1568cfb"). InnerVolumeSpecName "kube-api-access-n9zrs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.480949 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/91299a1e-6ae1-44b2-ade3-912ba1568cfb-utilities" (OuterVolumeSpecName: "utilities") pod "91299a1e-6ae1-44b2-ade3-912ba1568cfb" (UID: "91299a1e-6ae1-44b2-ade3-912ba1568cfb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.481417 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e545002-87d9-40ff-bf70-684e7b89f8f1-kube-api-access-lqc2t" (OuterVolumeSpecName: "kube-api-access-lqc2t") pod "0e545002-87d9-40ff-bf70-684e7b89f8f1" (UID: "0e545002-87d9-40ff-bf70-684e7b89f8f1"). InnerVolumeSpecName "kube-api-access-lqc2t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.481632 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9117eb3b-0187-4957-b851-da9e4c229c8f-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "9117eb3b-0187-4957-b851-da9e4c229c8f" (UID: "9117eb3b-0187-4957-b851-da9e4c229c8f"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.487459 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9117eb3b-0187-4957-b851-da9e4c229c8f-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "9117eb3b-0187-4957-b851-da9e4c229c8f" (UID: "9117eb3b-0187-4957-b851-da9e4c229c8f"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.488170 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9117eb3b-0187-4957-b851-da9e4c229c8f-kube-api-access-wnglk" (OuterVolumeSpecName: "kube-api-access-wnglk") pod "9117eb3b-0187-4957-b851-da9e4c229c8f" (UID: "9117eb3b-0187-4957-b851-da9e4c229c8f"). InnerVolumeSpecName "kube-api-access-wnglk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.495976 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/621fc295-6eae-4091-950e-c883d64bf7b8-kube-api-access-g5ml9" (OuterVolumeSpecName: "kube-api-access-g5ml9") pod "621fc295-6eae-4091-950e-c883d64bf7b8" (UID: "621fc295-6eae-4091-950e-c883d64bf7b8"). InnerVolumeSpecName "kube-api-access-g5ml9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.499813 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0e545002-87d9-40ff-bf70-684e7b89f8f1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0e545002-87d9-40ff-bf70-684e7b89f8f1" (UID: "0e545002-87d9-40ff-bf70-684e7b89f8f1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.531626 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/621fc295-6eae-4091-950e-c883d64bf7b8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "621fc295-6eae-4091-950e-c883d64bf7b8" (UID: "621fc295-6eae-4091-950e-c883d64bf7b8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.580647 4854 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9117eb3b-0187-4957-b851-da9e4c229c8f-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.580949 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91299a1e-6ae1-44b2-ade3-912ba1568cfb-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.580958 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wnglk\" (UniqueName: \"kubernetes.io/projected/9117eb3b-0187-4957-b851-da9e4c229c8f-kube-api-access-wnglk\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.580966 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e545002-87d9-40ff-bf70-684e7b89f8f1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.580975 4854 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/9117eb3b-0187-4957-b851-da9e4c229c8f-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.580983 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n9zrs\" (UniqueName: \"kubernetes.io/projected/91299a1e-6ae1-44b2-ade3-912ba1568cfb-kube-api-access-n9zrs\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.580991 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lqc2t\" (UniqueName: \"kubernetes.io/projected/0e545002-87d9-40ff-bf70-684e7b89f8f1-kube-api-access-lqc2t\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.580999 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/621fc295-6eae-4091-950e-c883d64bf7b8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.581007 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g5ml9\" (UniqueName: \"kubernetes.io/projected/621fc295-6eae-4091-950e-c883d64bf7b8-kube-api-access-g5ml9\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.590765 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/91299a1e-6ae1-44b2-ade3-912ba1568cfb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "91299a1e-6ae1-44b2-ade3-912ba1568cfb" (UID: "91299a1e-6ae1-44b2-ade3-912ba1568cfb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:40:07 crc kubenswrapper[4854]: I1125 09:40:07.681523 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91299a1e-6ae1-44b2-ade3-912ba1568cfb-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:08 crc kubenswrapper[4854]: I1125 09:40:08.106361 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8f2r9" event={"ID":"0e545002-87d9-40ff-bf70-684e7b89f8f1","Type":"ContainerDied","Data":"df72a091e6f7e25a61011e34efcf9f4194ab460147da118daee606b9ad38a936"} Nov 25 09:40:08 crc kubenswrapper[4854]: I1125 09:40:08.106458 4854 scope.go:117] "RemoveContainer" containerID="7299c0b17f8e0eff3480a90090e6f01be87a8996e09cc854c75e7620b66678f2" Nov 25 09:40:08 crc kubenswrapper[4854]: I1125 09:40:08.107796 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8f2r9" Nov 25 09:40:08 crc kubenswrapper[4854]: I1125 09:40:08.110625 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tdb4p" event={"ID":"91299a1e-6ae1-44b2-ade3-912ba1568cfb","Type":"ContainerDied","Data":"90d7db9b1e4ddcb38cf29bef6d4659636daf50e18b07831cf64354ee974738d5"} Nov 25 09:40:08 crc kubenswrapper[4854]: I1125 09:40:08.110700 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tdb4p" Nov 25 09:40:08 crc kubenswrapper[4854]: I1125 09:40:08.113716 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-64qr8" event={"ID":"b66bda32-eed5-4ea0-b10c-065038dce52d","Type":"ContainerDied","Data":"fbb5a809844fe20e8f03fed27fc5e0a69d9554ff63cf16e062348cc506a638e2"} Nov 25 09:40:08 crc kubenswrapper[4854]: I1125 09:40:08.113820 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-64qr8" Nov 25 09:40:08 crc kubenswrapper[4854]: I1125 09:40:08.116321 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-glqlm" event={"ID":"a6450a14-7c2a-4c3f-8861-876374e8cb9a","Type":"ContainerStarted","Data":"e7355e618199767b3505c4c73e6938ef1d782dc3833e262d6a17ee1cf142b945"} Nov 25 09:40:08 crc kubenswrapper[4854]: I1125 09:40:08.116370 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-glqlm" event={"ID":"a6450a14-7c2a-4c3f-8861-876374e8cb9a","Type":"ContainerStarted","Data":"b81d0e95110685e43048f68bae3322c0af48a9aee8d654c6ec88c9242692fb6c"} Nov 25 09:40:08 crc kubenswrapper[4854]: I1125 09:40:08.116829 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-glqlm" Nov 25 09:40:08 crc kubenswrapper[4854]: I1125 09:40:08.117816 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-mz9k6" Nov 25 09:40:08 crc kubenswrapper[4854]: I1125 09:40:08.117820 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-mz9k6" event={"ID":"9117eb3b-0187-4957-b851-da9e4c229c8f","Type":"ContainerDied","Data":"c322352cd33e8cee1967aa81d92ed36962cac897f0592cba5f837fe4beb13f99"} Nov 25 09:40:08 crc kubenswrapper[4854]: I1125 09:40:08.120263 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-glqlm" Nov 25 09:40:08 crc kubenswrapper[4854]: I1125 09:40:08.122557 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zznfv" event={"ID":"621fc295-6eae-4091-950e-c883d64bf7b8","Type":"ContainerDied","Data":"d3dd3d5e8e1e01589fa860b9d0d46d9d8926ead1fbbe56b9bb5b173e297c3542"} Nov 25 09:40:08 crc kubenswrapper[4854]: I1125 09:40:08.122570 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zznfv" Nov 25 09:40:08 crc kubenswrapper[4854]: I1125 09:40:08.130705 4854 scope.go:117] "RemoveContainer" containerID="ac8d56469bad5d7fe4e331d0a7d9a30c219ffa7ac85b193d4f7fba92d3b85fd3" Nov 25 09:40:08 crc kubenswrapper[4854]: I1125 09:40:08.140178 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-glqlm" podStartSLOduration=2.140158392 podStartE2EDuration="2.140158392s" podCreationTimestamp="2025-11-25 09:40:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:40:08.134844867 +0000 UTC m=+213.987838233" watchObservedRunningTime="2025-11-25 09:40:08.140158392 +0000 UTC m=+213.993151778" Nov 25 09:40:08 crc kubenswrapper[4854]: I1125 09:40:08.158707 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8f2r9"] Nov 25 09:40:08 crc kubenswrapper[4854]: I1125 09:40:08.163734 4854 scope.go:117] "RemoveContainer" containerID="f8aa75e535305910f352e0ad128c403df4369b7a6f79bdfc42a4b66f970d6547" Nov 25 09:40:08 crc kubenswrapper[4854]: I1125 09:40:08.169918 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-8f2r9"] Nov 25 09:40:08 crc kubenswrapper[4854]: I1125 09:40:08.176280 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-64qr8"] Nov 25 09:40:08 crc kubenswrapper[4854]: I1125 09:40:08.197477 4854 scope.go:117] "RemoveContainer" containerID="c2aab0c89795a8bdfc8f68ad97648efcec063ecddba5f253cc2842b5261fd717" Nov 25 09:40:08 crc kubenswrapper[4854]: I1125 09:40:08.207393 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-64qr8"] Nov 25 09:40:08 crc kubenswrapper[4854]: I1125 09:40:08.213222 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tdb4p"] Nov 25 09:40:08 crc kubenswrapper[4854]: I1125 09:40:08.220282 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-tdb4p"] Nov 25 09:40:08 crc kubenswrapper[4854]: I1125 09:40:08.226608 4854 scope.go:117] "RemoveContainer" containerID="4408381e59976b55ecd855722fb0c3f83cad6292a641ee0aa28cd7be1876e67c" Nov 25 09:40:08 crc kubenswrapper[4854]: I1125 09:40:08.227950 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-mz9k6"] Nov 25 09:40:08 crc kubenswrapper[4854]: I1125 09:40:08.230801 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-mz9k6"] Nov 25 09:40:08 crc kubenswrapper[4854]: I1125 09:40:08.233138 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-zznfv"] Nov 25 09:40:08 crc kubenswrapper[4854]: I1125 09:40:08.235758 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-zznfv"] Nov 25 09:40:08 crc kubenswrapper[4854]: I1125 09:40:08.241646 4854 scope.go:117] "RemoveContainer" containerID="7772755a3bee8624b514939238310cb34e3eaf41ea96f4c1e996501d4c877762" Nov 25 09:40:08 crc kubenswrapper[4854]: I1125 09:40:08.258046 4854 scope.go:117] "RemoveContainer" containerID="850d06e0bede6373eb00a6690de9f278ccb22a4ee61c555521331d76ee61eaa5" Nov 25 09:40:08 crc kubenswrapper[4854]: I1125 09:40:08.273025 4854 scope.go:117] "RemoveContainer" containerID="34c18f897940fee0125e2e2da32f9d306d785d62c9a7d595537c21917a6b50ee" Nov 25 09:40:08 crc kubenswrapper[4854]: I1125 09:40:08.286067 4854 scope.go:117] "RemoveContainer" containerID="4f2a0f5b5eb6f111c16b3cf73a4306e6763b608263b1a9a10ed59d9ec27ada60" Nov 25 09:40:08 crc kubenswrapper[4854]: I1125 09:40:08.299979 4854 scope.go:117] "RemoveContainer" containerID="7c5ab223884486e83e8b38ca08cd3fd9f322a85ea0a85aa06a26c4a5c089a915" Nov 25 09:40:08 crc kubenswrapper[4854]: I1125 09:40:08.312164 4854 scope.go:117] "RemoveContainer" containerID="0b266a7c7a8f3f9c4fcfa6fb91d96e904f36fb12723ea8b7179aa292e7238eb0" Nov 25 09:40:08 crc kubenswrapper[4854]: I1125 09:40:08.322977 4854 scope.go:117] "RemoveContainer" containerID="c336667d04e1d4b289090caec8580055312b8949a763deb4590ebf4cfca831ec" Nov 25 09:40:08 crc kubenswrapper[4854]: I1125 09:40:08.335463 4854 scope.go:117] "RemoveContainer" containerID="81793df50c17ade2916ec80918d9f94906ab2a5028c2b2dc972eb54e88b88f2f" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.010152 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-mzqj2"] Nov 25 09:40:09 crc kubenswrapper[4854]: E1125 09:40:09.010653 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91299a1e-6ae1-44b2-ade3-912ba1568cfb" containerName="registry-server" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.010685 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="91299a1e-6ae1-44b2-ade3-912ba1568cfb" containerName="registry-server" Nov 25 09:40:09 crc kubenswrapper[4854]: E1125 09:40:09.010696 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9117eb3b-0187-4957-b851-da9e4c229c8f" containerName="marketplace-operator" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.010702 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="9117eb3b-0187-4957-b851-da9e4c229c8f" containerName="marketplace-operator" Nov 25 09:40:09 crc kubenswrapper[4854]: E1125 09:40:09.010711 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b66bda32-eed5-4ea0-b10c-065038dce52d" containerName="extract-content" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.010717 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="b66bda32-eed5-4ea0-b10c-065038dce52d" containerName="extract-content" Nov 25 09:40:09 crc kubenswrapper[4854]: E1125 09:40:09.010723 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="621fc295-6eae-4091-950e-c883d64bf7b8" containerName="registry-server" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.010729 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="621fc295-6eae-4091-950e-c883d64bf7b8" containerName="registry-server" Nov 25 09:40:09 crc kubenswrapper[4854]: E1125 09:40:09.010737 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91299a1e-6ae1-44b2-ade3-912ba1568cfb" containerName="extract-content" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.010742 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="91299a1e-6ae1-44b2-ade3-912ba1568cfb" containerName="extract-content" Nov 25 09:40:09 crc kubenswrapper[4854]: E1125 09:40:09.010752 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e545002-87d9-40ff-bf70-684e7b89f8f1" containerName="extract-content" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.010757 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e545002-87d9-40ff-bf70-684e7b89f8f1" containerName="extract-content" Nov 25 09:40:09 crc kubenswrapper[4854]: E1125 09:40:09.010765 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="621fc295-6eae-4091-950e-c883d64bf7b8" containerName="extract-content" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.010771 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="621fc295-6eae-4091-950e-c883d64bf7b8" containerName="extract-content" Nov 25 09:40:09 crc kubenswrapper[4854]: E1125 09:40:09.010780 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b66bda32-eed5-4ea0-b10c-065038dce52d" containerName="registry-server" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.010786 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="b66bda32-eed5-4ea0-b10c-065038dce52d" containerName="registry-server" Nov 25 09:40:09 crc kubenswrapper[4854]: E1125 09:40:09.010794 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e545002-87d9-40ff-bf70-684e7b89f8f1" containerName="registry-server" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.010800 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e545002-87d9-40ff-bf70-684e7b89f8f1" containerName="registry-server" Nov 25 09:40:09 crc kubenswrapper[4854]: E1125 09:40:09.010808 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b66bda32-eed5-4ea0-b10c-065038dce52d" containerName="extract-utilities" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.010813 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="b66bda32-eed5-4ea0-b10c-065038dce52d" containerName="extract-utilities" Nov 25 09:40:09 crc kubenswrapper[4854]: E1125 09:40:09.010823 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91299a1e-6ae1-44b2-ade3-912ba1568cfb" containerName="extract-utilities" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.010829 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="91299a1e-6ae1-44b2-ade3-912ba1568cfb" containerName="extract-utilities" Nov 25 09:40:09 crc kubenswrapper[4854]: E1125 09:40:09.010836 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="621fc295-6eae-4091-950e-c883d64bf7b8" containerName="extract-utilities" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.010841 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="621fc295-6eae-4091-950e-c883d64bf7b8" containerName="extract-utilities" Nov 25 09:40:09 crc kubenswrapper[4854]: E1125 09:40:09.010847 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e545002-87d9-40ff-bf70-684e7b89f8f1" containerName="extract-utilities" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.010853 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e545002-87d9-40ff-bf70-684e7b89f8f1" containerName="extract-utilities" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.010927 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="9117eb3b-0187-4957-b851-da9e4c229c8f" containerName="marketplace-operator" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.010935 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="91299a1e-6ae1-44b2-ade3-912ba1568cfb" containerName="registry-server" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.010946 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="621fc295-6eae-4091-950e-c883d64bf7b8" containerName="registry-server" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.010956 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="b66bda32-eed5-4ea0-b10c-065038dce52d" containerName="registry-server" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.010963 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e545002-87d9-40ff-bf70-684e7b89f8f1" containerName="registry-server" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.011777 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mzqj2" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.014298 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.053651 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e545002-87d9-40ff-bf70-684e7b89f8f1" path="/var/lib/kubelet/pods/0e545002-87d9-40ff-bf70-684e7b89f8f1/volumes" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.062464 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="621fc295-6eae-4091-950e-c883d64bf7b8" path="/var/lib/kubelet/pods/621fc295-6eae-4091-950e-c883d64bf7b8/volumes" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.063210 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9117eb3b-0187-4957-b851-da9e4c229c8f" path="/var/lib/kubelet/pods/9117eb3b-0187-4957-b851-da9e4c229c8f/volumes" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.063743 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="91299a1e-6ae1-44b2-ade3-912ba1568cfb" path="/var/lib/kubelet/pods/91299a1e-6ae1-44b2-ade3-912ba1568cfb/volumes" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.064461 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b66bda32-eed5-4ea0-b10c-065038dce52d" path="/var/lib/kubelet/pods/b66bda32-eed5-4ea0-b10c-065038dce52d/volumes" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.065056 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mzqj2"] Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.212604 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kzqsz\" (UniqueName: \"kubernetes.io/projected/8ee67274-c034-4307-a53e-2655baa2d521-kube-api-access-kzqsz\") pod \"redhat-marketplace-mzqj2\" (UID: \"8ee67274-c034-4307-a53e-2655baa2d521\") " pod="openshift-marketplace/redhat-marketplace-mzqj2" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.212660 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ee67274-c034-4307-a53e-2655baa2d521-catalog-content\") pod \"redhat-marketplace-mzqj2\" (UID: \"8ee67274-c034-4307-a53e-2655baa2d521\") " pod="openshift-marketplace/redhat-marketplace-mzqj2" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.212712 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ee67274-c034-4307-a53e-2655baa2d521-utilities\") pod \"redhat-marketplace-mzqj2\" (UID: \"8ee67274-c034-4307-a53e-2655baa2d521\") " pod="openshift-marketplace/redhat-marketplace-mzqj2" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.213378 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-9r85f"] Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.214501 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9r85f" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.217827 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.221786 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9r85f"] Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.313434 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kzqsz\" (UniqueName: \"kubernetes.io/projected/8ee67274-c034-4307-a53e-2655baa2d521-kube-api-access-kzqsz\") pod \"redhat-marketplace-mzqj2\" (UID: \"8ee67274-c034-4307-a53e-2655baa2d521\") " pod="openshift-marketplace/redhat-marketplace-mzqj2" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.313505 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ee67274-c034-4307-a53e-2655baa2d521-catalog-content\") pod \"redhat-marketplace-mzqj2\" (UID: \"8ee67274-c034-4307-a53e-2655baa2d521\") " pod="openshift-marketplace/redhat-marketplace-mzqj2" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.313536 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41d2e6b9-caac-4cfc-b54c-aed1daa2fa28-catalog-content\") pod \"redhat-operators-9r85f\" (UID: \"41d2e6b9-caac-4cfc-b54c-aed1daa2fa28\") " pod="openshift-marketplace/redhat-operators-9r85f" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.313557 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ee67274-c034-4307-a53e-2655baa2d521-utilities\") pod \"redhat-marketplace-mzqj2\" (UID: \"8ee67274-c034-4307-a53e-2655baa2d521\") " pod="openshift-marketplace/redhat-marketplace-mzqj2" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.313585 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nflt4\" (UniqueName: \"kubernetes.io/projected/41d2e6b9-caac-4cfc-b54c-aed1daa2fa28-kube-api-access-nflt4\") pod \"redhat-operators-9r85f\" (UID: \"41d2e6b9-caac-4cfc-b54c-aed1daa2fa28\") " pod="openshift-marketplace/redhat-operators-9r85f" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.313658 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41d2e6b9-caac-4cfc-b54c-aed1daa2fa28-utilities\") pod \"redhat-operators-9r85f\" (UID: \"41d2e6b9-caac-4cfc-b54c-aed1daa2fa28\") " pod="openshift-marketplace/redhat-operators-9r85f" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.314198 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ee67274-c034-4307-a53e-2655baa2d521-utilities\") pod \"redhat-marketplace-mzqj2\" (UID: \"8ee67274-c034-4307-a53e-2655baa2d521\") " pod="openshift-marketplace/redhat-marketplace-mzqj2" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.314234 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ee67274-c034-4307-a53e-2655baa2d521-catalog-content\") pod \"redhat-marketplace-mzqj2\" (UID: \"8ee67274-c034-4307-a53e-2655baa2d521\") " pod="openshift-marketplace/redhat-marketplace-mzqj2" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.335166 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kzqsz\" (UniqueName: \"kubernetes.io/projected/8ee67274-c034-4307-a53e-2655baa2d521-kube-api-access-kzqsz\") pod \"redhat-marketplace-mzqj2\" (UID: \"8ee67274-c034-4307-a53e-2655baa2d521\") " pod="openshift-marketplace/redhat-marketplace-mzqj2" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.364833 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mzqj2" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.414869 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41d2e6b9-caac-4cfc-b54c-aed1daa2fa28-utilities\") pod \"redhat-operators-9r85f\" (UID: \"41d2e6b9-caac-4cfc-b54c-aed1daa2fa28\") " pod="openshift-marketplace/redhat-operators-9r85f" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.414945 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41d2e6b9-caac-4cfc-b54c-aed1daa2fa28-catalog-content\") pod \"redhat-operators-9r85f\" (UID: \"41d2e6b9-caac-4cfc-b54c-aed1daa2fa28\") " pod="openshift-marketplace/redhat-operators-9r85f" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.414973 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nflt4\" (UniqueName: \"kubernetes.io/projected/41d2e6b9-caac-4cfc-b54c-aed1daa2fa28-kube-api-access-nflt4\") pod \"redhat-operators-9r85f\" (UID: \"41d2e6b9-caac-4cfc-b54c-aed1daa2fa28\") " pod="openshift-marketplace/redhat-operators-9r85f" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.415353 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41d2e6b9-caac-4cfc-b54c-aed1daa2fa28-utilities\") pod \"redhat-operators-9r85f\" (UID: \"41d2e6b9-caac-4cfc-b54c-aed1daa2fa28\") " pod="openshift-marketplace/redhat-operators-9r85f" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.415413 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41d2e6b9-caac-4cfc-b54c-aed1daa2fa28-catalog-content\") pod \"redhat-operators-9r85f\" (UID: \"41d2e6b9-caac-4cfc-b54c-aed1daa2fa28\") " pod="openshift-marketplace/redhat-operators-9r85f" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.433440 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nflt4\" (UniqueName: \"kubernetes.io/projected/41d2e6b9-caac-4cfc-b54c-aed1daa2fa28-kube-api-access-nflt4\") pod \"redhat-operators-9r85f\" (UID: \"41d2e6b9-caac-4cfc-b54c-aed1daa2fa28\") " pod="openshift-marketplace/redhat-operators-9r85f" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.538194 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9r85f" Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.539114 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mzqj2"] Nov 25 09:40:09 crc kubenswrapper[4854]: W1125 09:40:09.549751 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8ee67274_c034_4307_a53e_2655baa2d521.slice/crio-c70e1e4d87071f9c6ac0d7eac2b6ab98607b05b92c1b8e6360bdad8672846af9 WatchSource:0}: Error finding container c70e1e4d87071f9c6ac0d7eac2b6ab98607b05b92c1b8e6360bdad8672846af9: Status 404 returned error can't find the container with id c70e1e4d87071f9c6ac0d7eac2b6ab98607b05b92c1b8e6360bdad8672846af9 Nov 25 09:40:09 crc kubenswrapper[4854]: I1125 09:40:09.706496 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9r85f"] Nov 25 09:40:10 crc kubenswrapper[4854]: I1125 09:40:10.140529 4854 generic.go:334] "Generic (PLEG): container finished" podID="41d2e6b9-caac-4cfc-b54c-aed1daa2fa28" containerID="db3ebebd0f6f6b180664552fabf99e0fe5a037163aca04ecf4d1f59b43b9f038" exitCode=0 Nov 25 09:40:10 crc kubenswrapper[4854]: I1125 09:40:10.140660 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9r85f" event={"ID":"41d2e6b9-caac-4cfc-b54c-aed1daa2fa28","Type":"ContainerDied","Data":"db3ebebd0f6f6b180664552fabf99e0fe5a037163aca04ecf4d1f59b43b9f038"} Nov 25 09:40:10 crc kubenswrapper[4854]: I1125 09:40:10.140928 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9r85f" event={"ID":"41d2e6b9-caac-4cfc-b54c-aed1daa2fa28","Type":"ContainerStarted","Data":"745fc28b60c85df52e5789e2545286a0324c880ee6ececc18bbab9688f7fc8ca"} Nov 25 09:40:10 crc kubenswrapper[4854]: I1125 09:40:10.145971 4854 generic.go:334] "Generic (PLEG): container finished" podID="8ee67274-c034-4307-a53e-2655baa2d521" containerID="e52a6785d08d904087463e3a6b888b9a174ebf113df0a704c98a08acbb6c1624" exitCode=0 Nov 25 09:40:10 crc kubenswrapper[4854]: I1125 09:40:10.146084 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mzqj2" event={"ID":"8ee67274-c034-4307-a53e-2655baa2d521","Type":"ContainerDied","Data":"e52a6785d08d904087463e3a6b888b9a174ebf113df0a704c98a08acbb6c1624"} Nov 25 09:40:10 crc kubenswrapper[4854]: I1125 09:40:10.146140 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mzqj2" event={"ID":"8ee67274-c034-4307-a53e-2655baa2d521","Type":"ContainerStarted","Data":"c70e1e4d87071f9c6ac0d7eac2b6ab98607b05b92c1b8e6360bdad8672846af9"} Nov 25 09:40:11 crc kubenswrapper[4854]: I1125 09:40:11.416836 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-5kx2t"] Nov 25 09:40:11 crc kubenswrapper[4854]: I1125 09:40:11.418144 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5kx2t" Nov 25 09:40:11 crc kubenswrapper[4854]: I1125 09:40:11.423663 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 25 09:40:11 crc kubenswrapper[4854]: I1125 09:40:11.425464 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5kx2t"] Nov 25 09:40:11 crc kubenswrapper[4854]: I1125 09:40:11.576366 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/260fd413-a6c2-4ae1-9dfa-d3bceccfd722-utilities\") pod \"community-operators-5kx2t\" (UID: \"260fd413-a6c2-4ae1-9dfa-d3bceccfd722\") " pod="openshift-marketplace/community-operators-5kx2t" Nov 25 09:40:11 crc kubenswrapper[4854]: I1125 09:40:11.576455 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5z47q\" (UniqueName: \"kubernetes.io/projected/260fd413-a6c2-4ae1-9dfa-d3bceccfd722-kube-api-access-5z47q\") pod \"community-operators-5kx2t\" (UID: \"260fd413-a6c2-4ae1-9dfa-d3bceccfd722\") " pod="openshift-marketplace/community-operators-5kx2t" Nov 25 09:40:11 crc kubenswrapper[4854]: I1125 09:40:11.576473 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/260fd413-a6c2-4ae1-9dfa-d3bceccfd722-catalog-content\") pod \"community-operators-5kx2t\" (UID: \"260fd413-a6c2-4ae1-9dfa-d3bceccfd722\") " pod="openshift-marketplace/community-operators-5kx2t" Nov 25 09:40:11 crc kubenswrapper[4854]: I1125 09:40:11.613452 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-h7fm2"] Nov 25 09:40:11 crc kubenswrapper[4854]: I1125 09:40:11.614712 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-h7fm2" Nov 25 09:40:11 crc kubenswrapper[4854]: I1125 09:40:11.617215 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 25 09:40:11 crc kubenswrapper[4854]: I1125 09:40:11.624528 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-h7fm2"] Nov 25 09:40:11 crc kubenswrapper[4854]: I1125 09:40:11.677403 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/260fd413-a6c2-4ae1-9dfa-d3bceccfd722-utilities\") pod \"community-operators-5kx2t\" (UID: \"260fd413-a6c2-4ae1-9dfa-d3bceccfd722\") " pod="openshift-marketplace/community-operators-5kx2t" Nov 25 09:40:11 crc kubenswrapper[4854]: I1125 09:40:11.677538 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5z47q\" (UniqueName: \"kubernetes.io/projected/260fd413-a6c2-4ae1-9dfa-d3bceccfd722-kube-api-access-5z47q\") pod \"community-operators-5kx2t\" (UID: \"260fd413-a6c2-4ae1-9dfa-d3bceccfd722\") " pod="openshift-marketplace/community-operators-5kx2t" Nov 25 09:40:11 crc kubenswrapper[4854]: I1125 09:40:11.677644 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/260fd413-a6c2-4ae1-9dfa-d3bceccfd722-catalog-content\") pod \"community-operators-5kx2t\" (UID: \"260fd413-a6c2-4ae1-9dfa-d3bceccfd722\") " pod="openshift-marketplace/community-operators-5kx2t" Nov 25 09:40:11 crc kubenswrapper[4854]: I1125 09:40:11.677896 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/260fd413-a6c2-4ae1-9dfa-d3bceccfd722-utilities\") pod \"community-operators-5kx2t\" (UID: \"260fd413-a6c2-4ae1-9dfa-d3bceccfd722\") " pod="openshift-marketplace/community-operators-5kx2t" Nov 25 09:40:11 crc kubenswrapper[4854]: I1125 09:40:11.677985 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/260fd413-a6c2-4ae1-9dfa-d3bceccfd722-catalog-content\") pod \"community-operators-5kx2t\" (UID: \"260fd413-a6c2-4ae1-9dfa-d3bceccfd722\") " pod="openshift-marketplace/community-operators-5kx2t" Nov 25 09:40:11 crc kubenswrapper[4854]: I1125 09:40:11.698745 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5z47q\" (UniqueName: \"kubernetes.io/projected/260fd413-a6c2-4ae1-9dfa-d3bceccfd722-kube-api-access-5z47q\") pod \"community-operators-5kx2t\" (UID: \"260fd413-a6c2-4ae1-9dfa-d3bceccfd722\") " pod="openshift-marketplace/community-operators-5kx2t" Nov 25 09:40:11 crc kubenswrapper[4854]: I1125 09:40:11.747057 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5kx2t" Nov 25 09:40:11 crc kubenswrapper[4854]: I1125 09:40:11.779257 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/30234f04-abcc-479e-96fc-40bfbce02d59-utilities\") pod \"certified-operators-h7fm2\" (UID: \"30234f04-abcc-479e-96fc-40bfbce02d59\") " pod="openshift-marketplace/certified-operators-h7fm2" Nov 25 09:40:11 crc kubenswrapper[4854]: I1125 09:40:11.779296 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/30234f04-abcc-479e-96fc-40bfbce02d59-catalog-content\") pod \"certified-operators-h7fm2\" (UID: \"30234f04-abcc-479e-96fc-40bfbce02d59\") " pod="openshift-marketplace/certified-operators-h7fm2" Nov 25 09:40:11 crc kubenswrapper[4854]: I1125 09:40:11.779392 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tpjtf\" (UniqueName: \"kubernetes.io/projected/30234f04-abcc-479e-96fc-40bfbce02d59-kube-api-access-tpjtf\") pod \"certified-operators-h7fm2\" (UID: \"30234f04-abcc-479e-96fc-40bfbce02d59\") " pod="openshift-marketplace/certified-operators-h7fm2" Nov 25 09:40:11 crc kubenswrapper[4854]: I1125 09:40:11.881133 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tpjtf\" (UniqueName: \"kubernetes.io/projected/30234f04-abcc-479e-96fc-40bfbce02d59-kube-api-access-tpjtf\") pod \"certified-operators-h7fm2\" (UID: \"30234f04-abcc-479e-96fc-40bfbce02d59\") " pod="openshift-marketplace/certified-operators-h7fm2" Nov 25 09:40:11 crc kubenswrapper[4854]: I1125 09:40:11.881478 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/30234f04-abcc-479e-96fc-40bfbce02d59-utilities\") pod \"certified-operators-h7fm2\" (UID: \"30234f04-abcc-479e-96fc-40bfbce02d59\") " pod="openshift-marketplace/certified-operators-h7fm2" Nov 25 09:40:11 crc kubenswrapper[4854]: I1125 09:40:11.881501 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/30234f04-abcc-479e-96fc-40bfbce02d59-catalog-content\") pod \"certified-operators-h7fm2\" (UID: \"30234f04-abcc-479e-96fc-40bfbce02d59\") " pod="openshift-marketplace/certified-operators-h7fm2" Nov 25 09:40:11 crc kubenswrapper[4854]: I1125 09:40:11.881963 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/30234f04-abcc-479e-96fc-40bfbce02d59-utilities\") pod \"certified-operators-h7fm2\" (UID: \"30234f04-abcc-479e-96fc-40bfbce02d59\") " pod="openshift-marketplace/certified-operators-h7fm2" Nov 25 09:40:11 crc kubenswrapper[4854]: I1125 09:40:11.882032 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/30234f04-abcc-479e-96fc-40bfbce02d59-catalog-content\") pod \"certified-operators-h7fm2\" (UID: \"30234f04-abcc-479e-96fc-40bfbce02d59\") " pod="openshift-marketplace/certified-operators-h7fm2" Nov 25 09:40:11 crc kubenswrapper[4854]: I1125 09:40:11.899645 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tpjtf\" (UniqueName: \"kubernetes.io/projected/30234f04-abcc-479e-96fc-40bfbce02d59-kube-api-access-tpjtf\") pod \"certified-operators-h7fm2\" (UID: \"30234f04-abcc-479e-96fc-40bfbce02d59\") " pod="openshift-marketplace/certified-operators-h7fm2" Nov 25 09:40:11 crc kubenswrapper[4854]: I1125 09:40:11.936823 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-h7fm2" Nov 25 09:40:11 crc kubenswrapper[4854]: I1125 09:40:11.945783 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5kx2t"] Nov 25 09:40:11 crc kubenswrapper[4854]: W1125 09:40:11.959091 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod260fd413_a6c2_4ae1_9dfa_d3bceccfd722.slice/crio-90e60ebee7ac6561c99e5295f4d330d4d83d9ac15a45b713bda35e3cc00b0374 WatchSource:0}: Error finding container 90e60ebee7ac6561c99e5295f4d330d4d83d9ac15a45b713bda35e3cc00b0374: Status 404 returned error can't find the container with id 90e60ebee7ac6561c99e5295f4d330d4d83d9ac15a45b713bda35e3cc00b0374 Nov 25 09:40:12 crc kubenswrapper[4854]: I1125 09:40:12.161873 4854 generic.go:334] "Generic (PLEG): container finished" podID="41d2e6b9-caac-4cfc-b54c-aed1daa2fa28" containerID="572d5ed33214fd568416f6e7ada1d9f381717afb53fb76049eeeec5249e8076f" exitCode=0 Nov 25 09:40:12 crc kubenswrapper[4854]: I1125 09:40:12.162013 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9r85f" event={"ID":"41d2e6b9-caac-4cfc-b54c-aed1daa2fa28","Type":"ContainerDied","Data":"572d5ed33214fd568416f6e7ada1d9f381717afb53fb76049eeeec5249e8076f"} Nov 25 09:40:12 crc kubenswrapper[4854]: I1125 09:40:12.164088 4854 generic.go:334] "Generic (PLEG): container finished" podID="8ee67274-c034-4307-a53e-2655baa2d521" containerID="5e1172e8c464639167c69b9c6d35e77caa394ae42579f3a0ed67e6b025618c72" exitCode=0 Nov 25 09:40:12 crc kubenswrapper[4854]: I1125 09:40:12.164133 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mzqj2" event={"ID":"8ee67274-c034-4307-a53e-2655baa2d521","Type":"ContainerDied","Data":"5e1172e8c464639167c69b9c6d35e77caa394ae42579f3a0ed67e6b025618c72"} Nov 25 09:40:12 crc kubenswrapper[4854]: I1125 09:40:12.166463 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5kx2t" event={"ID":"260fd413-a6c2-4ae1-9dfa-d3bceccfd722","Type":"ContainerStarted","Data":"34ca2b3e244f46adc52afa7cd811c109665ffafeac1eb640d76e214050c824cc"} Nov 25 09:40:12 crc kubenswrapper[4854]: I1125 09:40:12.166507 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5kx2t" event={"ID":"260fd413-a6c2-4ae1-9dfa-d3bceccfd722","Type":"ContainerStarted","Data":"90e60ebee7ac6561c99e5295f4d330d4d83d9ac15a45b713bda35e3cc00b0374"} Nov 25 09:40:12 crc kubenswrapper[4854]: I1125 09:40:12.327714 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-h7fm2"] Nov 25 09:40:13 crc kubenswrapper[4854]: I1125 09:40:13.175555 4854 generic.go:334] "Generic (PLEG): container finished" podID="30234f04-abcc-479e-96fc-40bfbce02d59" containerID="b4159bd3660426871f520f3a3b0003b54a5ca9e468df399068ebf9d78a716c32" exitCode=0 Nov 25 09:40:13 crc kubenswrapper[4854]: I1125 09:40:13.175630 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h7fm2" event={"ID":"30234f04-abcc-479e-96fc-40bfbce02d59","Type":"ContainerDied","Data":"b4159bd3660426871f520f3a3b0003b54a5ca9e468df399068ebf9d78a716c32"} Nov 25 09:40:13 crc kubenswrapper[4854]: I1125 09:40:13.175866 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h7fm2" event={"ID":"30234f04-abcc-479e-96fc-40bfbce02d59","Type":"ContainerStarted","Data":"aa2297ee2919c39f66f454aa0576fee718bab02804a4038e7590d1016751c0f4"} Nov 25 09:40:13 crc kubenswrapper[4854]: I1125 09:40:13.178231 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9r85f" event={"ID":"41d2e6b9-caac-4cfc-b54c-aed1daa2fa28","Type":"ContainerStarted","Data":"7edd164f4e8e5391f94c8e969c1ee496c91b710f7839537662ed5b0c611bfda4"} Nov 25 09:40:13 crc kubenswrapper[4854]: I1125 09:40:13.180144 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mzqj2" event={"ID":"8ee67274-c034-4307-a53e-2655baa2d521","Type":"ContainerStarted","Data":"89e36a1f8b27c4ef82eac580648dd9cbdc41ee2df30306f7e56842a49d052046"} Nov 25 09:40:13 crc kubenswrapper[4854]: I1125 09:40:13.184367 4854 generic.go:334] "Generic (PLEG): container finished" podID="260fd413-a6c2-4ae1-9dfa-d3bceccfd722" containerID="34ca2b3e244f46adc52afa7cd811c109665ffafeac1eb640d76e214050c824cc" exitCode=0 Nov 25 09:40:13 crc kubenswrapper[4854]: I1125 09:40:13.184415 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5kx2t" event={"ID":"260fd413-a6c2-4ae1-9dfa-d3bceccfd722","Type":"ContainerDied","Data":"34ca2b3e244f46adc52afa7cd811c109665ffafeac1eb640d76e214050c824cc"} Nov 25 09:40:13 crc kubenswrapper[4854]: I1125 09:40:13.215376 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-mzqj2" podStartSLOduration=2.783432694 podStartE2EDuration="5.215352475s" podCreationTimestamp="2025-11-25 09:40:08 +0000 UTC" firstStartedPulling="2025-11-25 09:40:10.147763265 +0000 UTC m=+216.000756631" lastFinishedPulling="2025-11-25 09:40:12.579683026 +0000 UTC m=+218.432676412" observedRunningTime="2025-11-25 09:40:13.211210726 +0000 UTC m=+219.064204122" watchObservedRunningTime="2025-11-25 09:40:13.215352475 +0000 UTC m=+219.068345851" Nov 25 09:40:13 crc kubenswrapper[4854]: I1125 09:40:13.240936 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-9r85f" podStartSLOduration=1.666026938 podStartE2EDuration="4.240918472s" podCreationTimestamp="2025-11-25 09:40:09 +0000 UTC" firstStartedPulling="2025-11-25 09:40:10.142045127 +0000 UTC m=+215.995038493" lastFinishedPulling="2025-11-25 09:40:12.716936651 +0000 UTC m=+218.569930027" observedRunningTime="2025-11-25 09:40:13.240332203 +0000 UTC m=+219.093325579" watchObservedRunningTime="2025-11-25 09:40:13.240918472 +0000 UTC m=+219.093911848" Nov 25 09:40:15 crc kubenswrapper[4854]: I1125 09:40:15.194239 4854 generic.go:334] "Generic (PLEG): container finished" podID="260fd413-a6c2-4ae1-9dfa-d3bceccfd722" containerID="1309eacff05c6e964331f21d0bf1e7a9993f0a8082645388f44c52eeea7e1e74" exitCode=0 Nov 25 09:40:15 crc kubenswrapper[4854]: I1125 09:40:15.194462 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5kx2t" event={"ID":"260fd413-a6c2-4ae1-9dfa-d3bceccfd722","Type":"ContainerDied","Data":"1309eacff05c6e964331f21d0bf1e7a9993f0a8082645388f44c52eeea7e1e74"} Nov 25 09:40:15 crc kubenswrapper[4854]: I1125 09:40:15.196352 4854 generic.go:334] "Generic (PLEG): container finished" podID="30234f04-abcc-479e-96fc-40bfbce02d59" containerID="8a06f252c6a2388f35ea353d5b524a277504048db22c21fa364fceafc373b4ac" exitCode=0 Nov 25 09:40:15 crc kubenswrapper[4854]: I1125 09:40:15.196479 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h7fm2" event={"ID":"30234f04-abcc-479e-96fc-40bfbce02d59","Type":"ContainerDied","Data":"8a06f252c6a2388f35ea353d5b524a277504048db22c21fa364fceafc373b4ac"} Nov 25 09:40:16 crc kubenswrapper[4854]: I1125 09:40:16.203255 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5kx2t" event={"ID":"260fd413-a6c2-4ae1-9dfa-d3bceccfd722","Type":"ContainerStarted","Data":"921ff457939de653c624f0012aeae57ce370e0d99505c2b1683a66f626d77c18"} Nov 25 09:40:16 crc kubenswrapper[4854]: I1125 09:40:16.206526 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h7fm2" event={"ID":"30234f04-abcc-479e-96fc-40bfbce02d59","Type":"ContainerStarted","Data":"fc49268dc00cf45aebd2f7dfe2d2495219a60d5a3cfbf130b475527ad352e081"} Nov 25 09:40:16 crc kubenswrapper[4854]: I1125 09:40:16.222182 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-5kx2t" podStartSLOduration=2.679584895 podStartE2EDuration="5.222165722s" podCreationTimestamp="2025-11-25 09:40:11 +0000 UTC" firstStartedPulling="2025-11-25 09:40:13.185838436 +0000 UTC m=+219.038831812" lastFinishedPulling="2025-11-25 09:40:15.728419263 +0000 UTC m=+221.581412639" observedRunningTime="2025-11-25 09:40:16.219610973 +0000 UTC m=+222.072604349" watchObservedRunningTime="2025-11-25 09:40:16.222165722 +0000 UTC m=+222.075159098" Nov 25 09:40:16 crc kubenswrapper[4854]: I1125 09:40:16.240991 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-h7fm2" podStartSLOduration=2.459594262 podStartE2EDuration="5.240975078s" podCreationTimestamp="2025-11-25 09:40:11 +0000 UTC" firstStartedPulling="2025-11-25 09:40:13.176793384 +0000 UTC m=+219.029786760" lastFinishedPulling="2025-11-25 09:40:15.95817421 +0000 UTC m=+221.811167576" observedRunningTime="2025-11-25 09:40:16.238431799 +0000 UTC m=+222.091425165" watchObservedRunningTime="2025-11-25 09:40:16.240975078 +0000 UTC m=+222.093968454" Nov 25 09:40:19 crc kubenswrapper[4854]: I1125 09:40:19.365939 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-mzqj2" Nov 25 09:40:19 crc kubenswrapper[4854]: I1125 09:40:19.366484 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-mzqj2" Nov 25 09:40:19 crc kubenswrapper[4854]: I1125 09:40:19.413306 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-mzqj2" Nov 25 09:40:19 crc kubenswrapper[4854]: I1125 09:40:19.539597 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-9r85f" Nov 25 09:40:19 crc kubenswrapper[4854]: I1125 09:40:19.539647 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9r85f" Nov 25 09:40:19 crc kubenswrapper[4854]: I1125 09:40:19.592580 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-9r85f" Nov 25 09:40:20 crc kubenswrapper[4854]: I1125 09:40:20.264202 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-9r85f" Nov 25 09:40:20 crc kubenswrapper[4854]: I1125 09:40:20.271323 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-mzqj2" Nov 25 09:40:21 crc kubenswrapper[4854]: I1125 09:40:21.747823 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-5kx2t" Nov 25 09:40:21 crc kubenswrapper[4854]: I1125 09:40:21.747920 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-5kx2t" Nov 25 09:40:21 crc kubenswrapper[4854]: I1125 09:40:21.796594 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-5kx2t" Nov 25 09:40:21 crc kubenswrapper[4854]: I1125 09:40:21.946220 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-h7fm2" Nov 25 09:40:21 crc kubenswrapper[4854]: I1125 09:40:21.946286 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-h7fm2" Nov 25 09:40:21 crc kubenswrapper[4854]: I1125 09:40:21.985279 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-h7fm2" Nov 25 09:40:22 crc kubenswrapper[4854]: I1125 09:40:22.279175 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-h7fm2" Nov 25 09:40:22 crc kubenswrapper[4854]: I1125 09:40:22.279295 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-5kx2t" Nov 25 09:40:25 crc kubenswrapper[4854]: I1125 09:40:25.029725 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:40:25 crc kubenswrapper[4854]: I1125 09:40:25.030151 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:40:25 crc kubenswrapper[4854]: I1125 09:40:25.030204 4854 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" Nov 25 09:40:25 crc kubenswrapper[4854]: I1125 09:40:25.030881 4854 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0e898d302f42097c6c149260d69f6cdc0bc4088e1b86714c3344a375b16cd7a9"} pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:40:25 crc kubenswrapper[4854]: I1125 09:40:25.030933 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" containerID="cri-o://0e898d302f42097c6c149260d69f6cdc0bc4088e1b86714c3344a375b16cd7a9" gracePeriod=600 Nov 25 09:40:26 crc kubenswrapper[4854]: I1125 09:40:26.262422 4854 generic.go:334] "Generic (PLEG): container finished" podID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerID="0e898d302f42097c6c149260d69f6cdc0bc4088e1b86714c3344a375b16cd7a9" exitCode=0 Nov 25 09:40:26 crc kubenswrapper[4854]: I1125 09:40:26.262507 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerDied","Data":"0e898d302f42097c6c149260d69f6cdc0bc4088e1b86714c3344a375b16cd7a9"} Nov 25 09:40:27 crc kubenswrapper[4854]: I1125 09:40:27.270222 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerStarted","Data":"5e4500c83ec36e72ad24d0c5585cfb6c0ce2eeda56eaac68cb46d27b8d832338"} Nov 25 09:40:37 crc kubenswrapper[4854]: I1125 09:40:37.613047 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/cluster-monitoring-operator-6d5b84845-2jtkd"] Nov 25 09:40:37 crc kubenswrapper[4854]: I1125 09:40:37.615590 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-2jtkd" Nov 25 09:40:37 crc kubenswrapper[4854]: I1125 09:40:37.618803 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"cluster-monitoring-operator-tls" Nov 25 09:40:37 crc kubenswrapper[4854]: I1125 09:40:37.619075 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kube-root-ca.crt" Nov 25 09:40:37 crc kubenswrapper[4854]: I1125 09:40:37.619254 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"openshift-service-ca.crt" Nov 25 09:40:37 crc kubenswrapper[4854]: I1125 09:40:37.619384 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"telemetry-config" Nov 25 09:40:37 crc kubenswrapper[4854]: I1125 09:40:37.619519 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"cluster-monitoring-operator-dockercfg-wwt9l" Nov 25 09:40:37 crc kubenswrapper[4854]: I1125 09:40:37.625340 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/cluster-monitoring-operator-6d5b84845-2jtkd"] Nov 25 09:40:37 crc kubenswrapper[4854]: I1125 09:40:37.741877 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-config\" (UniqueName: \"kubernetes.io/configmap/c85fc042-858d-4bc2-b3ef-db8c1edfe73e-telemetry-config\") pod \"cluster-monitoring-operator-6d5b84845-2jtkd\" (UID: \"c85fc042-858d-4bc2-b3ef-db8c1edfe73e\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-2jtkd" Nov 25 09:40:37 crc kubenswrapper[4854]: I1125 09:40:37.741930 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6vs4w\" (UniqueName: \"kubernetes.io/projected/c85fc042-858d-4bc2-b3ef-db8c1edfe73e-kube-api-access-6vs4w\") pod \"cluster-monitoring-operator-6d5b84845-2jtkd\" (UID: \"c85fc042-858d-4bc2-b3ef-db8c1edfe73e\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-2jtkd" Nov 25 09:40:37 crc kubenswrapper[4854]: I1125 09:40:37.742000 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cluster-monitoring-operator-tls\" (UniqueName: \"kubernetes.io/secret/c85fc042-858d-4bc2-b3ef-db8c1edfe73e-cluster-monitoring-operator-tls\") pod \"cluster-monitoring-operator-6d5b84845-2jtkd\" (UID: \"c85fc042-858d-4bc2-b3ef-db8c1edfe73e\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-2jtkd" Nov 25 09:40:37 crc kubenswrapper[4854]: I1125 09:40:37.843322 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-config\" (UniqueName: \"kubernetes.io/configmap/c85fc042-858d-4bc2-b3ef-db8c1edfe73e-telemetry-config\") pod \"cluster-monitoring-operator-6d5b84845-2jtkd\" (UID: \"c85fc042-858d-4bc2-b3ef-db8c1edfe73e\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-2jtkd" Nov 25 09:40:37 crc kubenswrapper[4854]: I1125 09:40:37.843393 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6vs4w\" (UniqueName: \"kubernetes.io/projected/c85fc042-858d-4bc2-b3ef-db8c1edfe73e-kube-api-access-6vs4w\") pod \"cluster-monitoring-operator-6d5b84845-2jtkd\" (UID: \"c85fc042-858d-4bc2-b3ef-db8c1edfe73e\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-2jtkd" Nov 25 09:40:37 crc kubenswrapper[4854]: I1125 09:40:37.843435 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cluster-monitoring-operator-tls\" (UniqueName: \"kubernetes.io/secret/c85fc042-858d-4bc2-b3ef-db8c1edfe73e-cluster-monitoring-operator-tls\") pod \"cluster-monitoring-operator-6d5b84845-2jtkd\" (UID: \"c85fc042-858d-4bc2-b3ef-db8c1edfe73e\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-2jtkd" Nov 25 09:40:37 crc kubenswrapper[4854]: I1125 09:40:37.844400 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-config\" (UniqueName: \"kubernetes.io/configmap/c85fc042-858d-4bc2-b3ef-db8c1edfe73e-telemetry-config\") pod \"cluster-monitoring-operator-6d5b84845-2jtkd\" (UID: \"c85fc042-858d-4bc2-b3ef-db8c1edfe73e\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-2jtkd" Nov 25 09:40:37 crc kubenswrapper[4854]: I1125 09:40:37.849308 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cluster-monitoring-operator-tls\" (UniqueName: \"kubernetes.io/secret/c85fc042-858d-4bc2-b3ef-db8c1edfe73e-cluster-monitoring-operator-tls\") pod \"cluster-monitoring-operator-6d5b84845-2jtkd\" (UID: \"c85fc042-858d-4bc2-b3ef-db8c1edfe73e\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-2jtkd" Nov 25 09:40:37 crc kubenswrapper[4854]: I1125 09:40:37.865403 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6vs4w\" (UniqueName: \"kubernetes.io/projected/c85fc042-858d-4bc2-b3ef-db8c1edfe73e-kube-api-access-6vs4w\") pod \"cluster-monitoring-operator-6d5b84845-2jtkd\" (UID: \"c85fc042-858d-4bc2-b3ef-db8c1edfe73e\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-2jtkd" Nov 25 09:40:37 crc kubenswrapper[4854]: I1125 09:40:37.931395 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-2jtkd" Nov 25 09:40:38 crc kubenswrapper[4854]: I1125 09:40:38.121697 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/cluster-monitoring-operator-6d5b84845-2jtkd"] Nov 25 09:40:38 crc kubenswrapper[4854]: I1125 09:40:38.337205 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-2jtkd" event={"ID":"c85fc042-858d-4bc2-b3ef-db8c1edfe73e","Type":"ContainerStarted","Data":"01f5e5549e7e85a223075b19a1009167ef8dbcc61c92df1a9a1c6b5df7e5aa89"} Nov 25 09:40:40 crc kubenswrapper[4854]: I1125 09:40:40.349284 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-2jtkd" event={"ID":"c85fc042-858d-4bc2-b3ef-db8c1edfe73e","Type":"ContainerStarted","Data":"5db8d4b44c9982db9709e97210f9544c76490f4c298b5efb9c00796bbf1b5aa2"} Nov 25 09:40:40 crc kubenswrapper[4854]: I1125 09:40:40.369272 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-2jtkd" podStartSLOduration=1.411494969 podStartE2EDuration="3.369233119s" podCreationTimestamp="2025-11-25 09:40:37 +0000 UTC" firstStartedPulling="2025-11-25 09:40:38.128959769 +0000 UTC m=+243.981953145" lastFinishedPulling="2025-11-25 09:40:40.086697919 +0000 UTC m=+245.939691295" observedRunningTime="2025-11-25 09:40:40.363371886 +0000 UTC m=+246.216365272" watchObservedRunningTime="2025-11-25 09:40:40.369233119 +0000 UTC m=+246.222226505" Nov 25 09:40:40 crc kubenswrapper[4854]: I1125 09:40:40.580195 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-p64dl"] Nov 25 09:40:40 crc kubenswrapper[4854]: I1125 09:40:40.580955 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-p64dl" Nov 25 09:40:40 crc kubenswrapper[4854]: I1125 09:40:40.595340 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-p64dl"] Nov 25 09:40:40 crc kubenswrapper[4854]: I1125 09:40:40.693630 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-pbrtp"] Nov 25 09:40:40 crc kubenswrapper[4854]: I1125 09:40:40.694397 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-pbrtp" Nov 25 09:40:40 crc kubenswrapper[4854]: I1125 09:40:40.696979 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-admission-webhook-dockercfg-kmrnp" Nov 25 09:40:40 crc kubenswrapper[4854]: I1125 09:40:40.697615 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-admission-webhook-tls" Nov 25 09:40:40 crc kubenswrapper[4854]: I1125 09:40:40.703525 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-pbrtp"] Nov 25 09:40:40 crc kubenswrapper[4854]: I1125 09:40:40.783315 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/6d980202-a47f-4bde-8b97-d76a9b6a0ee6-ca-trust-extracted\") pod \"image-registry-66df7c8f76-p64dl\" (UID: \"6d980202-a47f-4bde-8b97-d76a9b6a0ee6\") " pod="openshift-image-registry/image-registry-66df7c8f76-p64dl" Nov 25 09:40:40 crc kubenswrapper[4854]: I1125 09:40:40.783371 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ktj9l\" (UniqueName: \"kubernetes.io/projected/6d980202-a47f-4bde-8b97-d76a9b6a0ee6-kube-api-access-ktj9l\") pod \"image-registry-66df7c8f76-p64dl\" (UID: \"6d980202-a47f-4bde-8b97-d76a9b6a0ee6\") " pod="openshift-image-registry/image-registry-66df7c8f76-p64dl" Nov 25 09:40:40 crc kubenswrapper[4854]: I1125 09:40:40.783398 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/6d980202-a47f-4bde-8b97-d76a9b6a0ee6-installation-pull-secrets\") pod \"image-registry-66df7c8f76-p64dl\" (UID: \"6d980202-a47f-4bde-8b97-d76a9b6a0ee6\") " pod="openshift-image-registry/image-registry-66df7c8f76-p64dl" Nov 25 09:40:40 crc kubenswrapper[4854]: I1125 09:40:40.783501 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/6d980202-a47f-4bde-8b97-d76a9b6a0ee6-registry-tls\") pod \"image-registry-66df7c8f76-p64dl\" (UID: \"6d980202-a47f-4bde-8b97-d76a9b6a0ee6\") " pod="openshift-image-registry/image-registry-66df7c8f76-p64dl" Nov 25 09:40:40 crc kubenswrapper[4854]: I1125 09:40:40.783541 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6d980202-a47f-4bde-8b97-d76a9b6a0ee6-bound-sa-token\") pod \"image-registry-66df7c8f76-p64dl\" (UID: \"6d980202-a47f-4bde-8b97-d76a9b6a0ee6\") " pod="openshift-image-registry/image-registry-66df7c8f76-p64dl" Nov 25 09:40:40 crc kubenswrapper[4854]: I1125 09:40:40.783580 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/6d980202-a47f-4bde-8b97-d76a9b6a0ee6-registry-certificates\") pod \"image-registry-66df7c8f76-p64dl\" (UID: \"6d980202-a47f-4bde-8b97-d76a9b6a0ee6\") " pod="openshift-image-registry/image-registry-66df7c8f76-p64dl" Nov 25 09:40:40 crc kubenswrapper[4854]: I1125 09:40:40.783603 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6d980202-a47f-4bde-8b97-d76a9b6a0ee6-trusted-ca\") pod \"image-registry-66df7c8f76-p64dl\" (UID: \"6d980202-a47f-4bde-8b97-d76a9b6a0ee6\") " pod="openshift-image-registry/image-registry-66df7c8f76-p64dl" Nov 25 09:40:40 crc kubenswrapper[4854]: I1125 09:40:40.783639 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-p64dl\" (UID: \"6d980202-a47f-4bde-8b97-d76a9b6a0ee6\") " pod="openshift-image-registry/image-registry-66df7c8f76-p64dl" Nov 25 09:40:40 crc kubenswrapper[4854]: I1125 09:40:40.806359 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-p64dl\" (UID: \"6d980202-a47f-4bde-8b97-d76a9b6a0ee6\") " pod="openshift-image-registry/image-registry-66df7c8f76-p64dl" Nov 25 09:40:40 crc kubenswrapper[4854]: I1125 09:40:40.885269 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/6d980202-a47f-4bde-8b97-d76a9b6a0ee6-registry-tls\") pod \"image-registry-66df7c8f76-p64dl\" (UID: \"6d980202-a47f-4bde-8b97-d76a9b6a0ee6\") " pod="openshift-image-registry/image-registry-66df7c8f76-p64dl" Nov 25 09:40:40 crc kubenswrapper[4854]: I1125 09:40:40.885322 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6d980202-a47f-4bde-8b97-d76a9b6a0ee6-bound-sa-token\") pod \"image-registry-66df7c8f76-p64dl\" (UID: \"6d980202-a47f-4bde-8b97-d76a9b6a0ee6\") " pod="openshift-image-registry/image-registry-66df7c8f76-p64dl" Nov 25 09:40:40 crc kubenswrapper[4854]: I1125 09:40:40.885356 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/a448acc0-ef0a-45fc-b3db-67fdc5ff24c4-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-pbrtp\" (UID: \"a448acc0-ef0a-45fc-b3db-67fdc5ff24c4\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-pbrtp" Nov 25 09:40:40 crc kubenswrapper[4854]: I1125 09:40:40.885386 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/6d980202-a47f-4bde-8b97-d76a9b6a0ee6-registry-certificates\") pod \"image-registry-66df7c8f76-p64dl\" (UID: \"6d980202-a47f-4bde-8b97-d76a9b6a0ee6\") " pod="openshift-image-registry/image-registry-66df7c8f76-p64dl" Nov 25 09:40:40 crc kubenswrapper[4854]: I1125 09:40:40.885407 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6d980202-a47f-4bde-8b97-d76a9b6a0ee6-trusted-ca\") pod \"image-registry-66df7c8f76-p64dl\" (UID: \"6d980202-a47f-4bde-8b97-d76a9b6a0ee6\") " pod="openshift-image-registry/image-registry-66df7c8f76-p64dl" Nov 25 09:40:40 crc kubenswrapper[4854]: I1125 09:40:40.885533 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/6d980202-a47f-4bde-8b97-d76a9b6a0ee6-ca-trust-extracted\") pod \"image-registry-66df7c8f76-p64dl\" (UID: \"6d980202-a47f-4bde-8b97-d76a9b6a0ee6\") " pod="openshift-image-registry/image-registry-66df7c8f76-p64dl" Nov 25 09:40:40 crc kubenswrapper[4854]: I1125 09:40:40.885596 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ktj9l\" (UniqueName: \"kubernetes.io/projected/6d980202-a47f-4bde-8b97-d76a9b6a0ee6-kube-api-access-ktj9l\") pod \"image-registry-66df7c8f76-p64dl\" (UID: \"6d980202-a47f-4bde-8b97-d76a9b6a0ee6\") " pod="openshift-image-registry/image-registry-66df7c8f76-p64dl" Nov 25 09:40:40 crc kubenswrapper[4854]: I1125 09:40:40.885720 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/6d980202-a47f-4bde-8b97-d76a9b6a0ee6-installation-pull-secrets\") pod \"image-registry-66df7c8f76-p64dl\" (UID: \"6d980202-a47f-4bde-8b97-d76a9b6a0ee6\") " pod="openshift-image-registry/image-registry-66df7c8f76-p64dl" Nov 25 09:40:40 crc kubenswrapper[4854]: I1125 09:40:40.886083 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/6d980202-a47f-4bde-8b97-d76a9b6a0ee6-ca-trust-extracted\") pod \"image-registry-66df7c8f76-p64dl\" (UID: \"6d980202-a47f-4bde-8b97-d76a9b6a0ee6\") " pod="openshift-image-registry/image-registry-66df7c8f76-p64dl" Nov 25 09:40:40 crc kubenswrapper[4854]: I1125 09:40:40.886918 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6d980202-a47f-4bde-8b97-d76a9b6a0ee6-trusted-ca\") pod \"image-registry-66df7c8f76-p64dl\" (UID: \"6d980202-a47f-4bde-8b97-d76a9b6a0ee6\") " pod="openshift-image-registry/image-registry-66df7c8f76-p64dl" Nov 25 09:40:40 crc kubenswrapper[4854]: I1125 09:40:40.887282 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/6d980202-a47f-4bde-8b97-d76a9b6a0ee6-registry-certificates\") pod \"image-registry-66df7c8f76-p64dl\" (UID: \"6d980202-a47f-4bde-8b97-d76a9b6a0ee6\") " pod="openshift-image-registry/image-registry-66df7c8f76-p64dl" Nov 25 09:40:40 crc kubenswrapper[4854]: I1125 09:40:40.895636 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/6d980202-a47f-4bde-8b97-d76a9b6a0ee6-registry-tls\") pod \"image-registry-66df7c8f76-p64dl\" (UID: \"6d980202-a47f-4bde-8b97-d76a9b6a0ee6\") " pod="openshift-image-registry/image-registry-66df7c8f76-p64dl" Nov 25 09:40:40 crc kubenswrapper[4854]: I1125 09:40:40.897329 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/6d980202-a47f-4bde-8b97-d76a9b6a0ee6-installation-pull-secrets\") pod \"image-registry-66df7c8f76-p64dl\" (UID: \"6d980202-a47f-4bde-8b97-d76a9b6a0ee6\") " pod="openshift-image-registry/image-registry-66df7c8f76-p64dl" Nov 25 09:40:40 crc kubenswrapper[4854]: I1125 09:40:40.904505 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ktj9l\" (UniqueName: \"kubernetes.io/projected/6d980202-a47f-4bde-8b97-d76a9b6a0ee6-kube-api-access-ktj9l\") pod \"image-registry-66df7c8f76-p64dl\" (UID: \"6d980202-a47f-4bde-8b97-d76a9b6a0ee6\") " pod="openshift-image-registry/image-registry-66df7c8f76-p64dl" Nov 25 09:40:40 crc kubenswrapper[4854]: I1125 09:40:40.911897 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6d980202-a47f-4bde-8b97-d76a9b6a0ee6-bound-sa-token\") pod \"image-registry-66df7c8f76-p64dl\" (UID: \"6d980202-a47f-4bde-8b97-d76a9b6a0ee6\") " pod="openshift-image-registry/image-registry-66df7c8f76-p64dl" Nov 25 09:40:40 crc kubenswrapper[4854]: I1125 09:40:40.987374 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/a448acc0-ef0a-45fc-b3db-67fdc5ff24c4-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-pbrtp\" (UID: \"a448acc0-ef0a-45fc-b3db-67fdc5ff24c4\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-pbrtp" Nov 25 09:40:40 crc kubenswrapper[4854]: I1125 09:40:40.990136 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/a448acc0-ef0a-45fc-b3db-67fdc5ff24c4-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-pbrtp\" (UID: \"a448acc0-ef0a-45fc-b3db-67fdc5ff24c4\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-pbrtp" Nov 25 09:40:41 crc kubenswrapper[4854]: I1125 09:40:41.007099 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-pbrtp" Nov 25 09:40:41 crc kubenswrapper[4854]: I1125 09:40:41.193623 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-pbrtp"] Nov 25 09:40:41 crc kubenswrapper[4854]: I1125 09:40:41.195462 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-p64dl" Nov 25 09:40:41 crc kubenswrapper[4854]: W1125 09:40:41.197810 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda448acc0_ef0a_45fc_b3db_67fdc5ff24c4.slice/crio-0e9362efe4b97f8bd482f1eb4bef1f4d7e326b66b7dc391e4c43385b361823b8 WatchSource:0}: Error finding container 0e9362efe4b97f8bd482f1eb4bef1f4d7e326b66b7dc391e4c43385b361823b8: Status 404 returned error can't find the container with id 0e9362efe4b97f8bd482f1eb4bef1f4d7e326b66b7dc391e4c43385b361823b8 Nov 25 09:40:41 crc kubenswrapper[4854]: I1125 09:40:41.355470 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-pbrtp" event={"ID":"a448acc0-ef0a-45fc-b3db-67fdc5ff24c4","Type":"ContainerStarted","Data":"0e9362efe4b97f8bd482f1eb4bef1f4d7e326b66b7dc391e4c43385b361823b8"} Nov 25 09:40:41 crc kubenswrapper[4854]: I1125 09:40:41.395902 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-p64dl"] Nov 25 09:40:41 crc kubenswrapper[4854]: W1125 09:40:41.402198 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6d980202_a47f_4bde_8b97_d76a9b6a0ee6.slice/crio-c57831583d4da9e500ec585bd0b1a0c2e2824b132cea85afe6f248380c6e9997 WatchSource:0}: Error finding container c57831583d4da9e500ec585bd0b1a0c2e2824b132cea85afe6f248380c6e9997: Status 404 returned error can't find the container with id c57831583d4da9e500ec585bd0b1a0c2e2824b132cea85afe6f248380c6e9997 Nov 25 09:40:42 crc kubenswrapper[4854]: I1125 09:40:42.361611 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-p64dl" event={"ID":"6d980202-a47f-4bde-8b97-d76a9b6a0ee6","Type":"ContainerStarted","Data":"b5bcd0159f226d9b50f73d9b2cf6d171f17f79344f61e37c327ee48cd6f20d77"} Nov 25 09:40:42 crc kubenswrapper[4854]: I1125 09:40:42.362039 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-p64dl" event={"ID":"6d980202-a47f-4bde-8b97-d76a9b6a0ee6","Type":"ContainerStarted","Data":"c57831583d4da9e500ec585bd0b1a0c2e2824b132cea85afe6f248380c6e9997"} Nov 25 09:40:42 crc kubenswrapper[4854]: I1125 09:40:42.362058 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-p64dl" Nov 25 09:40:42 crc kubenswrapper[4854]: I1125 09:40:42.380025 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-p64dl" podStartSLOduration=2.380008651 podStartE2EDuration="2.380008651s" podCreationTimestamp="2025-11-25 09:40:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:40:42.378979489 +0000 UTC m=+248.231972885" watchObservedRunningTime="2025-11-25 09:40:42.380008651 +0000 UTC m=+248.233002027" Nov 25 09:40:43 crc kubenswrapper[4854]: I1125 09:40:43.368466 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-pbrtp" event={"ID":"a448acc0-ef0a-45fc-b3db-67fdc5ff24c4","Type":"ContainerStarted","Data":"8b6284572c4236759701c23a8674f6d1f46ef7f625a9838c809817cdb9db27c1"} Nov 25 09:40:43 crc kubenswrapper[4854]: I1125 09:40:43.368838 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-pbrtp" Nov 25 09:40:43 crc kubenswrapper[4854]: I1125 09:40:43.375382 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-pbrtp" Nov 25 09:40:43 crc kubenswrapper[4854]: I1125 09:40:43.385049 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-pbrtp" podStartSLOduration=1.409613026 podStartE2EDuration="3.385027026s" podCreationTimestamp="2025-11-25 09:40:40 +0000 UTC" firstStartedPulling="2025-11-25 09:40:41.203742713 +0000 UTC m=+247.056736079" lastFinishedPulling="2025-11-25 09:40:43.179156703 +0000 UTC m=+249.032150079" observedRunningTime="2025-11-25 09:40:43.384780108 +0000 UTC m=+249.237773494" watchObservedRunningTime="2025-11-25 09:40:43.385027026 +0000 UTC m=+249.238020402" Nov 25 09:40:43 crc kubenswrapper[4854]: I1125 09:40:43.741336 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/prometheus-operator-db54df47d-tl774"] Nov 25 09:40:43 crc kubenswrapper[4854]: I1125 09:40:43.742452 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-db54df47d-tl774" Nov 25 09:40:43 crc kubenswrapper[4854]: I1125 09:40:43.744400 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-dockercfg-rkdk7" Nov 25 09:40:43 crc kubenswrapper[4854]: I1125 09:40:43.744570 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-tls" Nov 25 09:40:43 crc kubenswrapper[4854]: I1125 09:40:43.745147 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-kube-rbac-proxy-config" Nov 25 09:40:43 crc kubenswrapper[4854]: I1125 09:40:43.745582 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"metrics-client-ca" Nov 25 09:40:43 crc kubenswrapper[4854]: I1125 09:40:43.756944 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-db54df47d-tl774"] Nov 25 09:40:43 crc kubenswrapper[4854]: I1125 09:40:43.929641 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-df66w\" (UniqueName: \"kubernetes.io/projected/c779be2c-0c36-491f-b788-bb96af7b06ac-kube-api-access-df66w\") pod \"prometheus-operator-db54df47d-tl774\" (UID: \"c779be2c-0c36-491f-b788-bb96af7b06ac\") " pod="openshift-monitoring/prometheus-operator-db54df47d-tl774" Nov 25 09:40:43 crc kubenswrapper[4854]: I1125 09:40:43.929836 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/c779be2c-0c36-491f-b788-bb96af7b06ac-metrics-client-ca\") pod \"prometheus-operator-db54df47d-tl774\" (UID: \"c779be2c-0c36-491f-b788-bb96af7b06ac\") " pod="openshift-monitoring/prometheus-operator-db54df47d-tl774" Nov 25 09:40:43 crc kubenswrapper[4854]: I1125 09:40:43.929925 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/c779be2c-0c36-491f-b788-bb96af7b06ac-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-tl774\" (UID: \"c779be2c-0c36-491f-b788-bb96af7b06ac\") " pod="openshift-monitoring/prometheus-operator-db54df47d-tl774" Nov 25 09:40:43 crc kubenswrapper[4854]: I1125 09:40:43.929947 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-operator-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/c779be2c-0c36-491f-b788-bb96af7b06ac-prometheus-operator-kube-rbac-proxy-config\") pod \"prometheus-operator-db54df47d-tl774\" (UID: \"c779be2c-0c36-491f-b788-bb96af7b06ac\") " pod="openshift-monitoring/prometheus-operator-db54df47d-tl774" Nov 25 09:40:44 crc kubenswrapper[4854]: I1125 09:40:44.031614 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/c779be2c-0c36-491f-b788-bb96af7b06ac-metrics-client-ca\") pod \"prometheus-operator-db54df47d-tl774\" (UID: \"c779be2c-0c36-491f-b788-bb96af7b06ac\") " pod="openshift-monitoring/prometheus-operator-db54df47d-tl774" Nov 25 09:40:44 crc kubenswrapper[4854]: I1125 09:40:44.031734 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/c779be2c-0c36-491f-b788-bb96af7b06ac-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-tl774\" (UID: \"c779be2c-0c36-491f-b788-bb96af7b06ac\") " pod="openshift-monitoring/prometheus-operator-db54df47d-tl774" Nov 25 09:40:44 crc kubenswrapper[4854]: I1125 09:40:44.031774 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-operator-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/c779be2c-0c36-491f-b788-bb96af7b06ac-prometheus-operator-kube-rbac-proxy-config\") pod \"prometheus-operator-db54df47d-tl774\" (UID: \"c779be2c-0c36-491f-b788-bb96af7b06ac\") " pod="openshift-monitoring/prometheus-operator-db54df47d-tl774" Nov 25 09:40:44 crc kubenswrapper[4854]: I1125 09:40:44.031856 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-df66w\" (UniqueName: \"kubernetes.io/projected/c779be2c-0c36-491f-b788-bb96af7b06ac-kube-api-access-df66w\") pod \"prometheus-operator-db54df47d-tl774\" (UID: \"c779be2c-0c36-491f-b788-bb96af7b06ac\") " pod="openshift-monitoring/prometheus-operator-db54df47d-tl774" Nov 25 09:40:44 crc kubenswrapper[4854]: I1125 09:40:44.032475 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/c779be2c-0c36-491f-b788-bb96af7b06ac-metrics-client-ca\") pod \"prometheus-operator-db54df47d-tl774\" (UID: \"c779be2c-0c36-491f-b788-bb96af7b06ac\") " pod="openshift-monitoring/prometheus-operator-db54df47d-tl774" Nov 25 09:40:44 crc kubenswrapper[4854]: E1125 09:40:44.032822 4854 secret.go:188] Couldn't get secret openshift-monitoring/prometheus-operator-tls: secret "prometheus-operator-tls" not found Nov 25 09:40:44 crc kubenswrapper[4854]: E1125 09:40:44.032881 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c779be2c-0c36-491f-b788-bb96af7b06ac-prometheus-operator-tls podName:c779be2c-0c36-491f-b788-bb96af7b06ac nodeName:}" failed. No retries permitted until 2025-11-25 09:40:44.532864945 +0000 UTC m=+250.385858321 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "prometheus-operator-tls" (UniqueName: "kubernetes.io/secret/c779be2c-0c36-491f-b788-bb96af7b06ac-prometheus-operator-tls") pod "prometheus-operator-db54df47d-tl774" (UID: "c779be2c-0c36-491f-b788-bb96af7b06ac") : secret "prometheus-operator-tls" not found Nov 25 09:40:44 crc kubenswrapper[4854]: I1125 09:40:44.038333 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-operator-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/c779be2c-0c36-491f-b788-bb96af7b06ac-prometheus-operator-kube-rbac-proxy-config\") pod \"prometheus-operator-db54df47d-tl774\" (UID: \"c779be2c-0c36-491f-b788-bb96af7b06ac\") " pod="openshift-monitoring/prometheus-operator-db54df47d-tl774" Nov 25 09:40:44 crc kubenswrapper[4854]: I1125 09:40:44.066599 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-df66w\" (UniqueName: \"kubernetes.io/projected/c779be2c-0c36-491f-b788-bb96af7b06ac-kube-api-access-df66w\") pod \"prometheus-operator-db54df47d-tl774\" (UID: \"c779be2c-0c36-491f-b788-bb96af7b06ac\") " pod="openshift-monitoring/prometheus-operator-db54df47d-tl774" Nov 25 09:40:44 crc kubenswrapper[4854]: I1125 09:40:44.540402 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/c779be2c-0c36-491f-b788-bb96af7b06ac-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-tl774\" (UID: \"c779be2c-0c36-491f-b788-bb96af7b06ac\") " pod="openshift-monitoring/prometheus-operator-db54df47d-tl774" Nov 25 09:40:44 crc kubenswrapper[4854]: I1125 09:40:44.544415 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/c779be2c-0c36-491f-b788-bb96af7b06ac-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-tl774\" (UID: \"c779be2c-0c36-491f-b788-bb96af7b06ac\") " pod="openshift-monitoring/prometheus-operator-db54df47d-tl774" Nov 25 09:40:44 crc kubenswrapper[4854]: I1125 09:40:44.656791 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-db54df47d-tl774" Nov 25 09:40:44 crc kubenswrapper[4854]: I1125 09:40:44.826626 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-db54df47d-tl774"] Nov 25 09:40:45 crc kubenswrapper[4854]: I1125 09:40:45.381011 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-db54df47d-tl774" event={"ID":"c779be2c-0c36-491f-b788-bb96af7b06ac","Type":"ContainerStarted","Data":"1db614b184f1f9e566256ceddb3c47c4016a6b1c40bd6d2e06174c6adb84ca31"} Nov 25 09:40:47 crc kubenswrapper[4854]: I1125 09:40:47.405223 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-db54df47d-tl774" event={"ID":"c779be2c-0c36-491f-b788-bb96af7b06ac","Type":"ContainerStarted","Data":"0d2ba0cf2e812ad2c105c6f7d18cf55df4e62301af85e2109e6c79d7b670080b"} Nov 25 09:40:48 crc kubenswrapper[4854]: I1125 09:40:48.412708 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-db54df47d-tl774" event={"ID":"c779be2c-0c36-491f-b788-bb96af7b06ac","Type":"ContainerStarted","Data":"8d4d1d1e6c6c3fb0eab37b726c5cb958947cb3efea053561f0f20b9095a85fd3"} Nov 25 09:40:48 crc kubenswrapper[4854]: I1125 09:40:48.426501 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/prometheus-operator-db54df47d-tl774" podStartSLOduration=3.113809863 podStartE2EDuration="5.426484999s" podCreationTimestamp="2025-11-25 09:40:43 +0000 UTC" firstStartedPulling="2025-11-25 09:40:44.833318268 +0000 UTC m=+250.686311644" lastFinishedPulling="2025-11-25 09:40:47.145993404 +0000 UTC m=+252.998986780" observedRunningTime="2025-11-25 09:40:48.425926291 +0000 UTC m=+254.278919677" watchObservedRunningTime="2025-11-25 09:40:48.426484999 +0000 UTC m=+254.279478375" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.133534 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/node-exporter-lgjq6"] Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.134723 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/node-exporter-lgjq6" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.136369 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-kube-rbac-proxy-config" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.136813 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-tls" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.137278 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-dockercfg-xw52g" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.173498 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/kube-state-metrics-777cb5bd5d-dh2gw"] Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.174973 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-dh2gw" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.180163 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-kube-rbac-proxy-config" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.180192 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kube-state-metrics-custom-resource-state-configmap" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.180219 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-tls" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.180360 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-dockercfg-jffmb" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.180365 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/openshift-state-metrics-566fddb674-gnd4n"] Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.181945 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/openshift-state-metrics-566fddb674-gnd4n" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.189484 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-tls" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.189583 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-kube-rbac-proxy-config" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.190439 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-dockercfg-ttgv5" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.193828 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/kube-state-metrics-777cb5bd5d-dh2gw"] Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.197762 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/openshift-state-metrics-566fddb674-gnd4n"] Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.231749 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/454a7eff-fe86-4427-bd18-f1b6becb5fc5-sys\") pod \"node-exporter-lgjq6\" (UID: \"454a7eff-fe86-4427-bd18-f1b6becb5fc5\") " pod="openshift-monitoring/node-exporter-lgjq6" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.231805 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/626a367e-170e-4d9e-ad51-fb6a6f319436-kube-state-metrics-kube-rbac-proxy-config\") pod \"kube-state-metrics-777cb5bd5d-dh2gw\" (UID: \"626a367e-170e-4d9e-ad51-fb6a6f319436\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-dh2gw" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.231838 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-textfile\" (UniqueName: \"kubernetes.io/empty-dir/454a7eff-fe86-4427-bd18-f1b6becb5fc5-node-exporter-textfile\") pod \"node-exporter-lgjq6\" (UID: \"454a7eff-fe86-4427-bd18-f1b6becb5fc5\") " pod="openshift-monitoring/node-exporter-lgjq6" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.231866 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xpzwq\" (UniqueName: \"kubernetes.io/projected/454a7eff-fe86-4427-bd18-f1b6becb5fc5-kube-api-access-xpzwq\") pod \"node-exporter-lgjq6\" (UID: \"454a7eff-fe86-4427-bd18-f1b6becb5fc5\") " pod="openshift-monitoring/node-exporter-lgjq6" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.231894 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"volume-directive-shadow\" (UniqueName: \"kubernetes.io/empty-dir/626a367e-170e-4d9e-ad51-fb6a6f319436-volume-directive-shadow\") pod \"kube-state-metrics-777cb5bd5d-dh2gw\" (UID: \"626a367e-170e-4d9e-ad51-fb6a6f319436\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-dh2gw" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.231922 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/626a367e-170e-4d9e-ad51-fb6a6f319436-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-dh2gw\" (UID: \"626a367e-170e-4d9e-ad51-fb6a6f319436\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-dh2gw" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.231949 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-wtmp\" (UniqueName: \"kubernetes.io/host-path/454a7eff-fe86-4427-bd18-f1b6becb5fc5-node-exporter-wtmp\") pod \"node-exporter-lgjq6\" (UID: \"454a7eff-fe86-4427-bd18-f1b6becb5fc5\") " pod="openshift-monitoring/node-exporter-lgjq6" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.231996 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/454a7eff-fe86-4427-bd18-f1b6becb5fc5-node-exporter-kube-rbac-proxy-config\") pod \"node-exporter-lgjq6\" (UID: \"454a7eff-fe86-4427-bd18-f1b6becb5fc5\") " pod="openshift-monitoring/node-exporter-lgjq6" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.232019 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-tls\" (UniqueName: \"kubernetes.io/secret/454a7eff-fe86-4427-bd18-f1b6becb5fc5-node-exporter-tls\") pod \"node-exporter-lgjq6\" (UID: \"454a7eff-fe86-4427-bd18-f1b6becb5fc5\") " pod="openshift-monitoring/node-exporter-lgjq6" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.232041 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/454a7eff-fe86-4427-bd18-f1b6becb5fc5-metrics-client-ca\") pod \"node-exporter-lgjq6\" (UID: \"454a7eff-fe86-4427-bd18-f1b6becb5fc5\") " pod="openshift-monitoring/node-exporter-lgjq6" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.232073 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/626a367e-170e-4d9e-ad51-fb6a6f319436-metrics-client-ca\") pod \"kube-state-metrics-777cb5bd5d-dh2gw\" (UID: \"626a367e-170e-4d9e-ad51-fb6a6f319436\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-dh2gw" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.232102 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-custom-resource-state-configmap\" (UniqueName: \"kubernetes.io/configmap/626a367e-170e-4d9e-ad51-fb6a6f319436-kube-state-metrics-custom-resource-state-configmap\") pod \"kube-state-metrics-777cb5bd5d-dh2gw\" (UID: \"626a367e-170e-4d9e-ad51-fb6a6f319436\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-dh2gw" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.232135 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"root\" (UniqueName: \"kubernetes.io/host-path/454a7eff-fe86-4427-bd18-f1b6becb5fc5-root\") pod \"node-exporter-lgjq6\" (UID: \"454a7eff-fe86-4427-bd18-f1b6becb5fc5\") " pod="openshift-monitoring/node-exporter-lgjq6" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.232155 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p8xs4\" (UniqueName: \"kubernetes.io/projected/626a367e-170e-4d9e-ad51-fb6a6f319436-kube-api-access-p8xs4\") pod \"kube-state-metrics-777cb5bd5d-dh2gw\" (UID: \"626a367e-170e-4d9e-ad51-fb6a6f319436\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-dh2gw" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.334158 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/81e4c276-112a-4e41-9e54-28fd4d661b7f-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-gnd4n\" (UID: \"81e4c276-112a-4e41-9e54-28fd4d661b7f\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-gnd4n" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.334212 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-custom-resource-state-configmap\" (UniqueName: \"kubernetes.io/configmap/626a367e-170e-4d9e-ad51-fb6a6f319436-kube-state-metrics-custom-resource-state-configmap\") pod \"kube-state-metrics-777cb5bd5d-dh2gw\" (UID: \"626a367e-170e-4d9e-ad51-fb6a6f319436\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-dh2gw" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.334270 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"root\" (UniqueName: \"kubernetes.io/host-path/454a7eff-fe86-4427-bd18-f1b6becb5fc5-root\") pod \"node-exporter-lgjq6\" (UID: \"454a7eff-fe86-4427-bd18-f1b6becb5fc5\") " pod="openshift-monitoring/node-exporter-lgjq6" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.334305 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p8xs4\" (UniqueName: \"kubernetes.io/projected/626a367e-170e-4d9e-ad51-fb6a6f319436-kube-api-access-p8xs4\") pod \"kube-state-metrics-777cb5bd5d-dh2gw\" (UID: \"626a367e-170e-4d9e-ad51-fb6a6f319436\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-dh2gw" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.334334 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/454a7eff-fe86-4427-bd18-f1b6becb5fc5-sys\") pod \"node-exporter-lgjq6\" (UID: \"454a7eff-fe86-4427-bd18-f1b6becb5fc5\") " pod="openshift-monitoring/node-exporter-lgjq6" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.334365 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/626a367e-170e-4d9e-ad51-fb6a6f319436-kube-state-metrics-kube-rbac-proxy-config\") pod \"kube-state-metrics-777cb5bd5d-dh2gw\" (UID: \"626a367e-170e-4d9e-ad51-fb6a6f319436\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-dh2gw" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.334385 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqfdc\" (UniqueName: \"kubernetes.io/projected/81e4c276-112a-4e41-9e54-28fd4d661b7f-kube-api-access-mqfdc\") pod \"openshift-state-metrics-566fddb674-gnd4n\" (UID: \"81e4c276-112a-4e41-9e54-28fd4d661b7f\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-gnd4n" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.334413 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-textfile\" (UniqueName: \"kubernetes.io/empty-dir/454a7eff-fe86-4427-bd18-f1b6becb5fc5-node-exporter-textfile\") pod \"node-exporter-lgjq6\" (UID: \"454a7eff-fe86-4427-bd18-f1b6becb5fc5\") " pod="openshift-monitoring/node-exporter-lgjq6" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.334442 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xpzwq\" (UniqueName: \"kubernetes.io/projected/454a7eff-fe86-4427-bd18-f1b6becb5fc5-kube-api-access-xpzwq\") pod \"node-exporter-lgjq6\" (UID: \"454a7eff-fe86-4427-bd18-f1b6becb5fc5\") " pod="openshift-monitoring/node-exporter-lgjq6" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.334469 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"volume-directive-shadow\" (UniqueName: \"kubernetes.io/empty-dir/626a367e-170e-4d9e-ad51-fb6a6f319436-volume-directive-shadow\") pod \"kube-state-metrics-777cb5bd5d-dh2gw\" (UID: \"626a367e-170e-4d9e-ad51-fb6a6f319436\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-dh2gw" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.334494 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/626a367e-170e-4d9e-ad51-fb6a6f319436-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-dh2gw\" (UID: \"626a367e-170e-4d9e-ad51-fb6a6f319436\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-dh2gw" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.334522 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-wtmp\" (UniqueName: \"kubernetes.io/host-path/454a7eff-fe86-4427-bd18-f1b6becb5fc5-node-exporter-wtmp\") pod \"node-exporter-lgjq6\" (UID: \"454a7eff-fe86-4427-bd18-f1b6becb5fc5\") " pod="openshift-monitoring/node-exporter-lgjq6" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.334558 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/81e4c276-112a-4e41-9e54-28fd4d661b7f-openshift-state-metrics-kube-rbac-proxy-config\") pod \"openshift-state-metrics-566fddb674-gnd4n\" (UID: \"81e4c276-112a-4e41-9e54-28fd4d661b7f\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-gnd4n" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.334589 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/81e4c276-112a-4e41-9e54-28fd4d661b7f-metrics-client-ca\") pod \"openshift-state-metrics-566fddb674-gnd4n\" (UID: \"81e4c276-112a-4e41-9e54-28fd4d661b7f\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-gnd4n" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.334621 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/454a7eff-fe86-4427-bd18-f1b6becb5fc5-node-exporter-kube-rbac-proxy-config\") pod \"node-exporter-lgjq6\" (UID: \"454a7eff-fe86-4427-bd18-f1b6becb5fc5\") " pod="openshift-monitoring/node-exporter-lgjq6" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.334644 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/454a7eff-fe86-4427-bd18-f1b6becb5fc5-metrics-client-ca\") pod \"node-exporter-lgjq6\" (UID: \"454a7eff-fe86-4427-bd18-f1b6becb5fc5\") " pod="openshift-monitoring/node-exporter-lgjq6" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.334664 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-tls\" (UniqueName: \"kubernetes.io/secret/454a7eff-fe86-4427-bd18-f1b6becb5fc5-node-exporter-tls\") pod \"node-exporter-lgjq6\" (UID: \"454a7eff-fe86-4427-bd18-f1b6becb5fc5\") " pod="openshift-monitoring/node-exporter-lgjq6" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.334713 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/626a367e-170e-4d9e-ad51-fb6a6f319436-metrics-client-ca\") pod \"kube-state-metrics-777cb5bd5d-dh2gw\" (UID: \"626a367e-170e-4d9e-ad51-fb6a6f319436\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-dh2gw" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.335441 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"root\" (UniqueName: \"kubernetes.io/host-path/454a7eff-fe86-4427-bd18-f1b6becb5fc5-root\") pod \"node-exporter-lgjq6\" (UID: \"454a7eff-fe86-4427-bd18-f1b6becb5fc5\") " pod="openshift-monitoring/node-exporter-lgjq6" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.335504 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-wtmp\" (UniqueName: \"kubernetes.io/host-path/454a7eff-fe86-4427-bd18-f1b6becb5fc5-node-exporter-wtmp\") pod \"node-exporter-lgjq6\" (UID: \"454a7eff-fe86-4427-bd18-f1b6becb5fc5\") " pod="openshift-monitoring/node-exporter-lgjq6" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.335563 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-textfile\" (UniqueName: \"kubernetes.io/empty-dir/454a7eff-fe86-4427-bd18-f1b6becb5fc5-node-exporter-textfile\") pod \"node-exporter-lgjq6\" (UID: \"454a7eff-fe86-4427-bd18-f1b6becb5fc5\") " pod="openshift-monitoring/node-exporter-lgjq6" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.335586 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/454a7eff-fe86-4427-bd18-f1b6becb5fc5-sys\") pod \"node-exporter-lgjq6\" (UID: \"454a7eff-fe86-4427-bd18-f1b6becb5fc5\") " pod="openshift-monitoring/node-exporter-lgjq6" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.335910 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"volume-directive-shadow\" (UniqueName: \"kubernetes.io/empty-dir/626a367e-170e-4d9e-ad51-fb6a6f319436-volume-directive-shadow\") pod \"kube-state-metrics-777cb5bd5d-dh2gw\" (UID: \"626a367e-170e-4d9e-ad51-fb6a6f319436\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-dh2gw" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.335989 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-custom-resource-state-configmap\" (UniqueName: \"kubernetes.io/configmap/626a367e-170e-4d9e-ad51-fb6a6f319436-kube-state-metrics-custom-resource-state-configmap\") pod \"kube-state-metrics-777cb5bd5d-dh2gw\" (UID: \"626a367e-170e-4d9e-ad51-fb6a6f319436\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-dh2gw" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.336074 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/626a367e-170e-4d9e-ad51-fb6a6f319436-metrics-client-ca\") pod \"kube-state-metrics-777cb5bd5d-dh2gw\" (UID: \"626a367e-170e-4d9e-ad51-fb6a6f319436\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-dh2gw" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.336255 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/454a7eff-fe86-4427-bd18-f1b6becb5fc5-metrics-client-ca\") pod \"node-exporter-lgjq6\" (UID: \"454a7eff-fe86-4427-bd18-f1b6becb5fc5\") " pod="openshift-monitoring/node-exporter-lgjq6" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.340248 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/454a7eff-fe86-4427-bd18-f1b6becb5fc5-node-exporter-kube-rbac-proxy-config\") pod \"node-exporter-lgjq6\" (UID: \"454a7eff-fe86-4427-bd18-f1b6becb5fc5\") " pod="openshift-monitoring/node-exporter-lgjq6" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.340976 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/626a367e-170e-4d9e-ad51-fb6a6f319436-kube-state-metrics-kube-rbac-proxy-config\") pod \"kube-state-metrics-777cb5bd5d-dh2gw\" (UID: \"626a367e-170e-4d9e-ad51-fb6a6f319436\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-dh2gw" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.344148 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/626a367e-170e-4d9e-ad51-fb6a6f319436-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-dh2gw\" (UID: \"626a367e-170e-4d9e-ad51-fb6a6f319436\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-dh2gw" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.346599 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-tls\" (UniqueName: \"kubernetes.io/secret/454a7eff-fe86-4427-bd18-f1b6becb5fc5-node-exporter-tls\") pod \"node-exporter-lgjq6\" (UID: \"454a7eff-fe86-4427-bd18-f1b6becb5fc5\") " pod="openshift-monitoring/node-exporter-lgjq6" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.352910 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xpzwq\" (UniqueName: \"kubernetes.io/projected/454a7eff-fe86-4427-bd18-f1b6becb5fc5-kube-api-access-xpzwq\") pod \"node-exporter-lgjq6\" (UID: \"454a7eff-fe86-4427-bd18-f1b6becb5fc5\") " pod="openshift-monitoring/node-exporter-lgjq6" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.353412 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p8xs4\" (UniqueName: \"kubernetes.io/projected/626a367e-170e-4d9e-ad51-fb6a6f319436-kube-api-access-p8xs4\") pod \"kube-state-metrics-777cb5bd5d-dh2gw\" (UID: \"626a367e-170e-4d9e-ad51-fb6a6f319436\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-dh2gw" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.435285 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/81e4c276-112a-4e41-9e54-28fd4d661b7f-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-gnd4n\" (UID: \"81e4c276-112a-4e41-9e54-28fd4d661b7f\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-gnd4n" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.435637 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqfdc\" (UniqueName: \"kubernetes.io/projected/81e4c276-112a-4e41-9e54-28fd4d661b7f-kube-api-access-mqfdc\") pod \"openshift-state-metrics-566fddb674-gnd4n\" (UID: \"81e4c276-112a-4e41-9e54-28fd4d661b7f\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-gnd4n" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.435714 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/81e4c276-112a-4e41-9e54-28fd4d661b7f-openshift-state-metrics-kube-rbac-proxy-config\") pod \"openshift-state-metrics-566fddb674-gnd4n\" (UID: \"81e4c276-112a-4e41-9e54-28fd4d661b7f\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-gnd4n" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.435739 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/81e4c276-112a-4e41-9e54-28fd4d661b7f-metrics-client-ca\") pod \"openshift-state-metrics-566fddb674-gnd4n\" (UID: \"81e4c276-112a-4e41-9e54-28fd4d661b7f\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-gnd4n" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.436764 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/81e4c276-112a-4e41-9e54-28fd4d661b7f-metrics-client-ca\") pod \"openshift-state-metrics-566fddb674-gnd4n\" (UID: \"81e4c276-112a-4e41-9e54-28fd4d661b7f\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-gnd4n" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.438421 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/81e4c276-112a-4e41-9e54-28fd4d661b7f-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-gnd4n\" (UID: \"81e4c276-112a-4e41-9e54-28fd4d661b7f\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-gnd4n" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.447281 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/81e4c276-112a-4e41-9e54-28fd4d661b7f-openshift-state-metrics-kube-rbac-proxy-config\") pod \"openshift-state-metrics-566fddb674-gnd4n\" (UID: \"81e4c276-112a-4e41-9e54-28fd4d661b7f\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-gnd4n" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.450106 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/node-exporter-lgjq6" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.459310 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqfdc\" (UniqueName: \"kubernetes.io/projected/81e4c276-112a-4e41-9e54-28fd4d661b7f-kube-api-access-mqfdc\") pod \"openshift-state-metrics-566fddb674-gnd4n\" (UID: \"81e4c276-112a-4e41-9e54-28fd4d661b7f\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-gnd4n" Nov 25 09:40:50 crc kubenswrapper[4854]: W1125 09:40:50.472001 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod454a7eff_fe86_4427_bd18_f1b6becb5fc5.slice/crio-db82e681c2f879eee9c91d178ffd72230b8f4bd88b8c3c472e9ee48117792e60 WatchSource:0}: Error finding container db82e681c2f879eee9c91d178ffd72230b8f4bd88b8c3c472e9ee48117792e60: Status 404 returned error can't find the container with id db82e681c2f879eee9c91d178ffd72230b8f4bd88b8c3c472e9ee48117792e60 Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.545927 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-dh2gw" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.560206 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/openshift-state-metrics-566fddb674-gnd4n" Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.832197 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/openshift-state-metrics-566fddb674-gnd4n"] Nov 25 09:40:50 crc kubenswrapper[4854]: I1125 09:40:50.970752 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/kube-state-metrics-777cb5bd5d-dh2gw"] Nov 25 09:40:50 crc kubenswrapper[4854]: W1125 09:40:50.974661 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod626a367e_170e_4d9e_ad51_fb6a6f319436.slice/crio-004b90779e4bdf151ad6d5da0c482ee6c166aae565ddb4b42b98ab138655186a WatchSource:0}: Error finding container 004b90779e4bdf151ad6d5da0c482ee6c166aae565ddb4b42b98ab138655186a: Status 404 returned error can't find the container with id 004b90779e4bdf151ad6d5da0c482ee6c166aae565ddb4b42b98ab138655186a Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.154101 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/alertmanager-main-0"] Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.156496 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.262253 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-generated" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.262353 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-web-config" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.262370 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.262446 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-tls" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.262540 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy-metric" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.262744 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"alertmanager-trusted-ca-bundle" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.263572 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-dockercfg-drgjn" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.264267 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-tls-assets-0" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.266584 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy-web" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.270729 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/alertmanager-main-0"] Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.348509 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/64d5859f-ec07-4744-9534-bca1c147c0a5-alertmanager-trusted-ca-bundle\") pod \"alertmanager-main-0\" (UID: \"64d5859f-ec07-4744-9534-bca1c147c0a5\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.348565 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/64d5859f-ec07-4744-9534-bca1c147c0a5-config-out\") pod \"alertmanager-main-0\" (UID: \"64d5859f-ec07-4744-9534-bca1c147c0a5\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.348592 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/64d5859f-ec07-4744-9534-bca1c147c0a5-tls-assets\") pod \"alertmanager-main-0\" (UID: \"64d5859f-ec07-4744-9534-bca1c147c0a5\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.348615 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/64d5859f-ec07-4744-9534-bca1c147c0a5-metrics-client-ca\") pod \"alertmanager-main-0\" (UID: \"64d5859f-ec07-4744-9534-bca1c147c0a5\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.348658 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/64d5859f-ec07-4744-9534-bca1c147c0a5-config-volume\") pod \"alertmanager-main-0\" (UID: \"64d5859f-ec07-4744-9534-bca1c147c0a5\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.348712 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-main-tls\" (UniqueName: \"kubernetes.io/secret/64d5859f-ec07-4744-9534-bca1c147c0a5-secret-alertmanager-main-tls\") pod \"alertmanager-main-0\" (UID: \"64d5859f-ec07-4744-9534-bca1c147c0a5\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.348749 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/64d5859f-ec07-4744-9534-bca1c147c0a5-secret-alertmanager-kube-rbac-proxy-web\") pod \"alertmanager-main-0\" (UID: \"64d5859f-ec07-4744-9534-bca1c147c0a5\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.348786 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-kube-rbac-proxy-metric\" (UniqueName: \"kubernetes.io/secret/64d5859f-ec07-4744-9534-bca1c147c0a5-secret-alertmanager-kube-rbac-proxy-metric\") pod \"alertmanager-main-0\" (UID: \"64d5859f-ec07-4744-9534-bca1c147c0a5\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.348806 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5jg9v\" (UniqueName: \"kubernetes.io/projected/64d5859f-ec07-4744-9534-bca1c147c0a5-kube-api-access-5jg9v\") pod \"alertmanager-main-0\" (UID: \"64d5859f-ec07-4744-9534-bca1c147c0a5\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.348834 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/64d5859f-ec07-4744-9534-bca1c147c0a5-secret-alertmanager-kube-rbac-proxy\") pod \"alertmanager-main-0\" (UID: \"64d5859f-ec07-4744-9534-bca1c147c0a5\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.348855 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-main-db\" (UniqueName: \"kubernetes.io/empty-dir/64d5859f-ec07-4744-9534-bca1c147c0a5-alertmanager-main-db\") pod \"alertmanager-main-0\" (UID: \"64d5859f-ec07-4744-9534-bca1c147c0a5\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.348879 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/64d5859f-ec07-4744-9534-bca1c147c0a5-web-config\") pod \"alertmanager-main-0\" (UID: \"64d5859f-ec07-4744-9534-bca1c147c0a5\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.428531 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-lgjq6" event={"ID":"454a7eff-fe86-4427-bd18-f1b6becb5fc5","Type":"ContainerStarted","Data":"db82e681c2f879eee9c91d178ffd72230b8f4bd88b8c3c472e9ee48117792e60"} Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.429634 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-dh2gw" event={"ID":"626a367e-170e-4d9e-ad51-fb6a6f319436","Type":"ContainerStarted","Data":"004b90779e4bdf151ad6d5da0c482ee6c166aae565ddb4b42b98ab138655186a"} Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.430664 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-gnd4n" event={"ID":"81e4c276-112a-4e41-9e54-28fd4d661b7f","Type":"ContainerStarted","Data":"3198f3f4ef631304801abb861010d63ecf20eb5b8163994eec31986a6d54e395"} Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.430702 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-gnd4n" event={"ID":"81e4c276-112a-4e41-9e54-28fd4d661b7f","Type":"ContainerStarted","Data":"04b5a3fec283050a826d72b1203c713a6bdd4ddcb86ae236785b45aadd92f2a4"} Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.450586 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/64d5859f-ec07-4744-9534-bca1c147c0a5-secret-alertmanager-kube-rbac-proxy-web\") pod \"alertmanager-main-0\" (UID: \"64d5859f-ec07-4744-9534-bca1c147c0a5\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.450726 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-kube-rbac-proxy-metric\" (UniqueName: \"kubernetes.io/secret/64d5859f-ec07-4744-9534-bca1c147c0a5-secret-alertmanager-kube-rbac-proxy-metric\") pod \"alertmanager-main-0\" (UID: \"64d5859f-ec07-4744-9534-bca1c147c0a5\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.450756 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5jg9v\" (UniqueName: \"kubernetes.io/projected/64d5859f-ec07-4744-9534-bca1c147c0a5-kube-api-access-5jg9v\") pod \"alertmanager-main-0\" (UID: \"64d5859f-ec07-4744-9534-bca1c147c0a5\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.450796 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/64d5859f-ec07-4744-9534-bca1c147c0a5-secret-alertmanager-kube-rbac-proxy\") pod \"alertmanager-main-0\" (UID: \"64d5859f-ec07-4744-9534-bca1c147c0a5\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.450821 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-main-db\" (UniqueName: \"kubernetes.io/empty-dir/64d5859f-ec07-4744-9534-bca1c147c0a5-alertmanager-main-db\") pod \"alertmanager-main-0\" (UID: \"64d5859f-ec07-4744-9534-bca1c147c0a5\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.450849 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/64d5859f-ec07-4744-9534-bca1c147c0a5-web-config\") pod \"alertmanager-main-0\" (UID: \"64d5859f-ec07-4744-9534-bca1c147c0a5\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.450880 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/64d5859f-ec07-4744-9534-bca1c147c0a5-alertmanager-trusted-ca-bundle\") pod \"alertmanager-main-0\" (UID: \"64d5859f-ec07-4744-9534-bca1c147c0a5\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.450905 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/64d5859f-ec07-4744-9534-bca1c147c0a5-config-out\") pod \"alertmanager-main-0\" (UID: \"64d5859f-ec07-4744-9534-bca1c147c0a5\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.450930 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/64d5859f-ec07-4744-9534-bca1c147c0a5-metrics-client-ca\") pod \"alertmanager-main-0\" (UID: \"64d5859f-ec07-4744-9534-bca1c147c0a5\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.450952 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/64d5859f-ec07-4744-9534-bca1c147c0a5-tls-assets\") pod \"alertmanager-main-0\" (UID: \"64d5859f-ec07-4744-9534-bca1c147c0a5\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.450997 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/64d5859f-ec07-4744-9534-bca1c147c0a5-config-volume\") pod \"alertmanager-main-0\" (UID: \"64d5859f-ec07-4744-9534-bca1c147c0a5\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.451028 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-main-tls\" (UniqueName: \"kubernetes.io/secret/64d5859f-ec07-4744-9534-bca1c147c0a5-secret-alertmanager-main-tls\") pod \"alertmanager-main-0\" (UID: \"64d5859f-ec07-4744-9534-bca1c147c0a5\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:51 crc kubenswrapper[4854]: E1125 09:40:51.451179 4854 secret.go:188] Couldn't get secret openshift-monitoring/alertmanager-main-tls: secret "alertmanager-main-tls" not found Nov 25 09:40:51 crc kubenswrapper[4854]: E1125 09:40:51.451244 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/64d5859f-ec07-4744-9534-bca1c147c0a5-secret-alertmanager-main-tls podName:64d5859f-ec07-4744-9534-bca1c147c0a5 nodeName:}" failed. No retries permitted until 2025-11-25 09:40:51.951223453 +0000 UTC m=+257.804216829 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "secret-alertmanager-main-tls" (UniqueName: "kubernetes.io/secret/64d5859f-ec07-4744-9534-bca1c147c0a5-secret-alertmanager-main-tls") pod "alertmanager-main-0" (UID: "64d5859f-ec07-4744-9534-bca1c147c0a5") : secret "alertmanager-main-tls" not found Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.452128 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-main-db\" (UniqueName: \"kubernetes.io/empty-dir/64d5859f-ec07-4744-9534-bca1c147c0a5-alertmanager-main-db\") pod \"alertmanager-main-0\" (UID: \"64d5859f-ec07-4744-9534-bca1c147c0a5\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.452372 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/64d5859f-ec07-4744-9534-bca1c147c0a5-metrics-client-ca\") pod \"alertmanager-main-0\" (UID: \"64d5859f-ec07-4744-9534-bca1c147c0a5\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.452603 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/64d5859f-ec07-4744-9534-bca1c147c0a5-alertmanager-trusted-ca-bundle\") pod \"alertmanager-main-0\" (UID: \"64d5859f-ec07-4744-9534-bca1c147c0a5\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.457686 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-kube-rbac-proxy-metric\" (UniqueName: \"kubernetes.io/secret/64d5859f-ec07-4744-9534-bca1c147c0a5-secret-alertmanager-kube-rbac-proxy-metric\") pod \"alertmanager-main-0\" (UID: \"64d5859f-ec07-4744-9534-bca1c147c0a5\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.458462 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/64d5859f-ec07-4744-9534-bca1c147c0a5-config-out\") pod \"alertmanager-main-0\" (UID: \"64d5859f-ec07-4744-9534-bca1c147c0a5\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.458664 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/64d5859f-ec07-4744-9534-bca1c147c0a5-config-volume\") pod \"alertmanager-main-0\" (UID: \"64d5859f-ec07-4744-9534-bca1c147c0a5\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.459151 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/64d5859f-ec07-4744-9534-bca1c147c0a5-secret-alertmanager-kube-rbac-proxy-web\") pod \"alertmanager-main-0\" (UID: \"64d5859f-ec07-4744-9534-bca1c147c0a5\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.463129 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/64d5859f-ec07-4744-9534-bca1c147c0a5-web-config\") pod \"alertmanager-main-0\" (UID: \"64d5859f-ec07-4744-9534-bca1c147c0a5\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.463160 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/64d5859f-ec07-4744-9534-bca1c147c0a5-secret-alertmanager-kube-rbac-proxy\") pod \"alertmanager-main-0\" (UID: \"64d5859f-ec07-4744-9534-bca1c147c0a5\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.475081 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5jg9v\" (UniqueName: \"kubernetes.io/projected/64d5859f-ec07-4744-9534-bca1c147c0a5-kube-api-access-5jg9v\") pod \"alertmanager-main-0\" (UID: \"64d5859f-ec07-4744-9534-bca1c147c0a5\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.488300 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/64d5859f-ec07-4744-9534-bca1c147c0a5-tls-assets\") pod \"alertmanager-main-0\" (UID: \"64d5859f-ec07-4744-9534-bca1c147c0a5\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.959025 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-main-tls\" (UniqueName: \"kubernetes.io/secret/64d5859f-ec07-4744-9534-bca1c147c0a5-secret-alertmanager-main-tls\") pod \"alertmanager-main-0\" (UID: \"64d5859f-ec07-4744-9534-bca1c147c0a5\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:51 crc kubenswrapper[4854]: I1125 09:40:51.964534 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-main-tls\" (UniqueName: \"kubernetes.io/secret/64d5859f-ec07-4744-9534-bca1c147c0a5-secret-alertmanager-main-tls\") pod \"alertmanager-main-0\" (UID: \"64d5859f-ec07-4744-9534-bca1c147c0a5\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:52 crc kubenswrapper[4854]: I1125 09:40:52.070603 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/thanos-querier-b96897c74-47cqm"] Nov 25 09:40:52 crc kubenswrapper[4854]: I1125 09:40:52.075278 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/thanos-querier-b96897c74-47cqm" Nov 25 09:40:52 crc kubenswrapper[4854]: I1125 09:40:52.079285 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-rules" Nov 25 09:40:52 crc kubenswrapper[4854]: I1125 09:40:52.079310 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-metrics" Nov 25 09:40:52 crc kubenswrapper[4854]: I1125 09:40:52.079399 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-dockercfg-cl6hc" Nov 25 09:40:52 crc kubenswrapper[4854]: I1125 09:40:52.079462 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-tls" Nov 25 09:40:52 crc kubenswrapper[4854]: I1125 09:40:52.082172 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-web" Nov 25 09:40:52 crc kubenswrapper[4854]: I1125 09:40:52.082583 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-grpc-tls-28ohpcjhsv8th" Nov 25 09:40:52 crc kubenswrapper[4854]: I1125 09:40:52.083389 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy" Nov 25 09:40:52 crc kubenswrapper[4854]: I1125 09:40:52.086070 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/thanos-querier-b96897c74-47cqm"] Nov 25 09:40:52 crc kubenswrapper[4854]: I1125 09:40:52.195392 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:40:52 crc kubenswrapper[4854]: I1125 09:40:52.263640 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/92023885-45a6-401d-b121-c04f13ee6790-secret-grpc-tls\") pod \"thanos-querier-b96897c74-47cqm\" (UID: \"92023885-45a6-401d-b121-c04f13ee6790\") " pod="openshift-monitoring/thanos-querier-b96897c74-47cqm" Nov 25 09:40:52 crc kubenswrapper[4854]: I1125 09:40:52.263711 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-metrics\" (UniqueName: \"kubernetes.io/secret/92023885-45a6-401d-b121-c04f13ee6790-secret-thanos-querier-kube-rbac-proxy-metrics\") pod \"thanos-querier-b96897c74-47cqm\" (UID: \"92023885-45a6-401d-b121-c04f13ee6790\") " pod="openshift-monitoring/thanos-querier-b96897c74-47cqm" Nov 25 09:40:52 crc kubenswrapper[4854]: I1125 09:40:52.263744 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/92023885-45a6-401d-b121-c04f13ee6790-secret-thanos-querier-kube-rbac-proxy-web\") pod \"thanos-querier-b96897c74-47cqm\" (UID: \"92023885-45a6-401d-b121-c04f13ee6790\") " pod="openshift-monitoring/thanos-querier-b96897c74-47cqm" Nov 25 09:40:52 crc kubenswrapper[4854]: I1125 09:40:52.263786 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/92023885-45a6-401d-b121-c04f13ee6790-metrics-client-ca\") pod \"thanos-querier-b96897c74-47cqm\" (UID: \"92023885-45a6-401d-b121-c04f13ee6790\") " pod="openshift-monitoring/thanos-querier-b96897c74-47cqm" Nov 25 09:40:52 crc kubenswrapper[4854]: I1125 09:40:52.263808 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-rules\" (UniqueName: \"kubernetes.io/secret/92023885-45a6-401d-b121-c04f13ee6790-secret-thanos-querier-kube-rbac-proxy-rules\") pod \"thanos-querier-b96897c74-47cqm\" (UID: \"92023885-45a6-401d-b121-c04f13ee6790\") " pod="openshift-monitoring/thanos-querier-b96897c74-47cqm" Nov 25 09:40:52 crc kubenswrapper[4854]: I1125 09:40:52.263834 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-tls\" (UniqueName: \"kubernetes.io/secret/92023885-45a6-401d-b121-c04f13ee6790-secret-thanos-querier-tls\") pod \"thanos-querier-b96897c74-47cqm\" (UID: \"92023885-45a6-401d-b121-c04f13ee6790\") " pod="openshift-monitoring/thanos-querier-b96897c74-47cqm" Nov 25 09:40:52 crc kubenswrapper[4854]: I1125 09:40:52.263929 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/92023885-45a6-401d-b121-c04f13ee6790-secret-thanos-querier-kube-rbac-proxy\") pod \"thanos-querier-b96897c74-47cqm\" (UID: \"92023885-45a6-401d-b121-c04f13ee6790\") " pod="openshift-monitoring/thanos-querier-b96897c74-47cqm" Nov 25 09:40:52 crc kubenswrapper[4854]: I1125 09:40:52.263976 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bb59h\" (UniqueName: \"kubernetes.io/projected/92023885-45a6-401d-b121-c04f13ee6790-kube-api-access-bb59h\") pod \"thanos-querier-b96897c74-47cqm\" (UID: \"92023885-45a6-401d-b121-c04f13ee6790\") " pod="openshift-monitoring/thanos-querier-b96897c74-47cqm" Nov 25 09:40:52 crc kubenswrapper[4854]: I1125 09:40:52.365298 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/92023885-45a6-401d-b121-c04f13ee6790-secret-grpc-tls\") pod \"thanos-querier-b96897c74-47cqm\" (UID: \"92023885-45a6-401d-b121-c04f13ee6790\") " pod="openshift-monitoring/thanos-querier-b96897c74-47cqm" Nov 25 09:40:52 crc kubenswrapper[4854]: I1125 09:40:52.365350 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-metrics\" (UniqueName: \"kubernetes.io/secret/92023885-45a6-401d-b121-c04f13ee6790-secret-thanos-querier-kube-rbac-proxy-metrics\") pod \"thanos-querier-b96897c74-47cqm\" (UID: \"92023885-45a6-401d-b121-c04f13ee6790\") " pod="openshift-monitoring/thanos-querier-b96897c74-47cqm" Nov 25 09:40:52 crc kubenswrapper[4854]: I1125 09:40:52.365385 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/92023885-45a6-401d-b121-c04f13ee6790-secret-thanos-querier-kube-rbac-proxy-web\") pod \"thanos-querier-b96897c74-47cqm\" (UID: \"92023885-45a6-401d-b121-c04f13ee6790\") " pod="openshift-monitoring/thanos-querier-b96897c74-47cqm" Nov 25 09:40:52 crc kubenswrapper[4854]: I1125 09:40:52.365424 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/92023885-45a6-401d-b121-c04f13ee6790-metrics-client-ca\") pod \"thanos-querier-b96897c74-47cqm\" (UID: \"92023885-45a6-401d-b121-c04f13ee6790\") " pod="openshift-monitoring/thanos-querier-b96897c74-47cqm" Nov 25 09:40:52 crc kubenswrapper[4854]: I1125 09:40:52.365444 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-rules\" (UniqueName: \"kubernetes.io/secret/92023885-45a6-401d-b121-c04f13ee6790-secret-thanos-querier-kube-rbac-proxy-rules\") pod \"thanos-querier-b96897c74-47cqm\" (UID: \"92023885-45a6-401d-b121-c04f13ee6790\") " pod="openshift-monitoring/thanos-querier-b96897c74-47cqm" Nov 25 09:40:52 crc kubenswrapper[4854]: I1125 09:40:52.365470 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-tls\" (UniqueName: \"kubernetes.io/secret/92023885-45a6-401d-b121-c04f13ee6790-secret-thanos-querier-tls\") pod \"thanos-querier-b96897c74-47cqm\" (UID: \"92023885-45a6-401d-b121-c04f13ee6790\") " pod="openshift-monitoring/thanos-querier-b96897c74-47cqm" Nov 25 09:40:52 crc kubenswrapper[4854]: I1125 09:40:52.365492 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/92023885-45a6-401d-b121-c04f13ee6790-secret-thanos-querier-kube-rbac-proxy\") pod \"thanos-querier-b96897c74-47cqm\" (UID: \"92023885-45a6-401d-b121-c04f13ee6790\") " pod="openshift-monitoring/thanos-querier-b96897c74-47cqm" Nov 25 09:40:52 crc kubenswrapper[4854]: I1125 09:40:52.365518 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bb59h\" (UniqueName: \"kubernetes.io/projected/92023885-45a6-401d-b121-c04f13ee6790-kube-api-access-bb59h\") pod \"thanos-querier-b96897c74-47cqm\" (UID: \"92023885-45a6-401d-b121-c04f13ee6790\") " pod="openshift-monitoring/thanos-querier-b96897c74-47cqm" Nov 25 09:40:52 crc kubenswrapper[4854]: I1125 09:40:52.367368 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/92023885-45a6-401d-b121-c04f13ee6790-metrics-client-ca\") pod \"thanos-querier-b96897c74-47cqm\" (UID: \"92023885-45a6-401d-b121-c04f13ee6790\") " pod="openshift-monitoring/thanos-querier-b96897c74-47cqm" Nov 25 09:40:52 crc kubenswrapper[4854]: I1125 09:40:52.369823 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/92023885-45a6-401d-b121-c04f13ee6790-secret-grpc-tls\") pod \"thanos-querier-b96897c74-47cqm\" (UID: \"92023885-45a6-401d-b121-c04f13ee6790\") " pod="openshift-monitoring/thanos-querier-b96897c74-47cqm" Nov 25 09:40:52 crc kubenswrapper[4854]: I1125 09:40:52.370965 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/92023885-45a6-401d-b121-c04f13ee6790-secret-thanos-querier-kube-rbac-proxy\") pod \"thanos-querier-b96897c74-47cqm\" (UID: \"92023885-45a6-401d-b121-c04f13ee6790\") " pod="openshift-monitoring/thanos-querier-b96897c74-47cqm" Nov 25 09:40:52 crc kubenswrapper[4854]: I1125 09:40:52.371083 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-tls\" (UniqueName: \"kubernetes.io/secret/92023885-45a6-401d-b121-c04f13ee6790-secret-thanos-querier-tls\") pod \"thanos-querier-b96897c74-47cqm\" (UID: \"92023885-45a6-401d-b121-c04f13ee6790\") " pod="openshift-monitoring/thanos-querier-b96897c74-47cqm" Nov 25 09:40:52 crc kubenswrapper[4854]: I1125 09:40:52.371168 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/92023885-45a6-401d-b121-c04f13ee6790-secret-thanos-querier-kube-rbac-proxy-web\") pod \"thanos-querier-b96897c74-47cqm\" (UID: \"92023885-45a6-401d-b121-c04f13ee6790\") " pod="openshift-monitoring/thanos-querier-b96897c74-47cqm" Nov 25 09:40:52 crc kubenswrapper[4854]: I1125 09:40:52.374579 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy-metrics\" (UniqueName: \"kubernetes.io/secret/92023885-45a6-401d-b121-c04f13ee6790-secret-thanos-querier-kube-rbac-proxy-metrics\") pod \"thanos-querier-b96897c74-47cqm\" (UID: \"92023885-45a6-401d-b121-c04f13ee6790\") " pod="openshift-monitoring/thanos-querier-b96897c74-47cqm" Nov 25 09:40:52 crc kubenswrapper[4854]: I1125 09:40:52.376795 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy-rules\" (UniqueName: \"kubernetes.io/secret/92023885-45a6-401d-b121-c04f13ee6790-secret-thanos-querier-kube-rbac-proxy-rules\") pod \"thanos-querier-b96897c74-47cqm\" (UID: \"92023885-45a6-401d-b121-c04f13ee6790\") " pod="openshift-monitoring/thanos-querier-b96897c74-47cqm" Nov 25 09:40:52 crc kubenswrapper[4854]: I1125 09:40:52.385745 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bb59h\" (UniqueName: \"kubernetes.io/projected/92023885-45a6-401d-b121-c04f13ee6790-kube-api-access-bb59h\") pod \"thanos-querier-b96897c74-47cqm\" (UID: \"92023885-45a6-401d-b121-c04f13ee6790\") " pod="openshift-monitoring/thanos-querier-b96897c74-47cqm" Nov 25 09:40:52 crc kubenswrapper[4854]: I1125 09:40:52.396799 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/thanos-querier-b96897c74-47cqm" Nov 25 09:40:52 crc kubenswrapper[4854]: I1125 09:40:52.437065 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-gnd4n" event={"ID":"81e4c276-112a-4e41-9e54-28fd4d661b7f","Type":"ContainerStarted","Data":"8fd93f01c3cec6a9cd32dc62f2afdcc7de5b603c4c2da14280bc3acedb698dfc"} Nov 25 09:40:53 crc kubenswrapper[4854]: I1125 09:40:53.097100 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/alertmanager-main-0"] Nov 25 09:40:53 crc kubenswrapper[4854]: I1125 09:40:53.104727 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/thanos-querier-b96897c74-47cqm"] Nov 25 09:40:53 crc kubenswrapper[4854]: W1125 09:40:53.116789 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod64d5859f_ec07_4744_9534_bca1c147c0a5.slice/crio-cec79a8379dfc7d149849f48ff373e006d44f1753d17048ccd4ed530986becc5 WatchSource:0}: Error finding container cec79a8379dfc7d149849f48ff373e006d44f1753d17048ccd4ed530986becc5: Status 404 returned error can't find the container with id cec79a8379dfc7d149849f48ff373e006d44f1753d17048ccd4ed530986becc5 Nov 25 09:40:53 crc kubenswrapper[4854]: I1125 09:40:53.442357 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-b96897c74-47cqm" event={"ID":"92023885-45a6-401d-b121-c04f13ee6790","Type":"ContainerStarted","Data":"1033516820cddcfe24a8fe5a4a40d532843988a337091c1887bb64c10ca9e3bb"} Nov 25 09:40:53 crc kubenswrapper[4854]: I1125 09:40:53.443701 4854 generic.go:334] "Generic (PLEG): container finished" podID="454a7eff-fe86-4427-bd18-f1b6becb5fc5" containerID="1c5bae0975c99e554fcfc3b8f127b77d72e7aad9a38f3acbe96b6f9929716b7b" exitCode=0 Nov 25 09:40:53 crc kubenswrapper[4854]: I1125 09:40:53.443817 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-lgjq6" event={"ID":"454a7eff-fe86-4427-bd18-f1b6becb5fc5","Type":"ContainerDied","Data":"1c5bae0975c99e554fcfc3b8f127b77d72e7aad9a38f3acbe96b6f9929716b7b"} Nov 25 09:40:53 crc kubenswrapper[4854]: I1125 09:40:53.446565 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"64d5859f-ec07-4744-9534-bca1c147c0a5","Type":"ContainerStarted","Data":"cec79a8379dfc7d149849f48ff373e006d44f1753d17048ccd4ed530986becc5"} Nov 25 09:40:53 crc kubenswrapper[4854]: I1125 09:40:53.448449 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-dh2gw" event={"ID":"626a367e-170e-4d9e-ad51-fb6a6f319436","Type":"ContainerStarted","Data":"3c1d0c89ca8ee4086242dc11af10df722ab241e2f30db70767023e8dbde28710"} Nov 25 09:40:53 crc kubenswrapper[4854]: I1125 09:40:53.448482 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-dh2gw" event={"ID":"626a367e-170e-4d9e-ad51-fb6a6f319436","Type":"ContainerStarted","Data":"22ddea9325454ab3dd1cfc0de0844a00571334dc45ac5e1474391a21319628ab"} Nov 25 09:40:53 crc kubenswrapper[4854]: I1125 09:40:53.448495 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-dh2gw" event={"ID":"626a367e-170e-4d9e-ad51-fb6a6f319436","Type":"ContainerStarted","Data":"5ce0d00c57663e27998d8a8e3fc48f11adc9d9adab138df18aa2c942b6962d76"} Nov 25 09:40:54 crc kubenswrapper[4854]: I1125 09:40:54.458475 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-gnd4n" event={"ID":"81e4c276-112a-4e41-9e54-28fd4d661b7f","Type":"ContainerStarted","Data":"bfdfb11ec22cac3463451d5ee5d7bb164bbaf9458b7621bd2da1bd09b9609525"} Nov 25 09:40:54 crc kubenswrapper[4854]: I1125 09:40:54.463084 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-lgjq6" event={"ID":"454a7eff-fe86-4427-bd18-f1b6becb5fc5","Type":"ContainerStarted","Data":"ba12c78aff17a90ebde722f10b48eb6c345ae1172694992e347a4f3ca4d0352a"} Nov 25 09:40:54 crc kubenswrapper[4854]: I1125 09:40:54.463136 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-lgjq6" event={"ID":"454a7eff-fe86-4427-bd18-f1b6becb5fc5","Type":"ContainerStarted","Data":"71a5dec264ae32243138686823505d834e824acc8ee385a439a6812249f1be68"} Nov 25 09:40:54 crc kubenswrapper[4854]: I1125 09:40:54.478759 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-dh2gw" podStartSLOduration=2.7960759619999997 podStartE2EDuration="4.478742955s" podCreationTimestamp="2025-11-25 09:40:50 +0000 UTC" firstStartedPulling="2025-11-25 09:40:50.977191208 +0000 UTC m=+256.830184584" lastFinishedPulling="2025-11-25 09:40:52.659858201 +0000 UTC m=+258.512851577" observedRunningTime="2025-11-25 09:40:53.481205124 +0000 UTC m=+259.334198510" watchObservedRunningTime="2025-11-25 09:40:54.478742955 +0000 UTC m=+260.331736331" Nov 25 09:40:54 crc kubenswrapper[4854]: I1125 09:40:54.498716 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/openshift-state-metrics-566fddb674-gnd4n" podStartSLOduration=2.540299917 podStartE2EDuration="4.498698007s" podCreationTimestamp="2025-11-25 09:40:50 +0000 UTC" firstStartedPulling="2025-11-25 09:40:51.589911784 +0000 UTC m=+257.442905160" lastFinishedPulling="2025-11-25 09:40:53.548309874 +0000 UTC m=+259.401303250" observedRunningTime="2025-11-25 09:40:54.478920331 +0000 UTC m=+260.331913717" watchObservedRunningTime="2025-11-25 09:40:54.498698007 +0000 UTC m=+260.351691383" Nov 25 09:40:54 crc kubenswrapper[4854]: I1125 09:40:54.499861 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/node-exporter-lgjq6" podStartSLOduration=2.31820502 podStartE2EDuration="4.499855943s" podCreationTimestamp="2025-11-25 09:40:50 +0000 UTC" firstStartedPulling="2025-11-25 09:40:50.474173021 +0000 UTC m=+256.327166397" lastFinishedPulling="2025-11-25 09:40:52.655823934 +0000 UTC m=+258.508817320" observedRunningTime="2025-11-25 09:40:54.498316025 +0000 UTC m=+260.351309411" watchObservedRunningTime="2025-11-25 09:40:54.499855943 +0000 UTC m=+260.352849319" Nov 25 09:40:54 crc kubenswrapper[4854]: I1125 09:40:54.877990 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-7796957d69-r8tsd"] Nov 25 09:40:54 crc kubenswrapper[4854]: I1125 09:40:54.878743 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7796957d69-r8tsd" Nov 25 09:40:54 crc kubenswrapper[4854]: I1125 09:40:54.900856 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-7796957d69-r8tsd"] Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.011615 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/04cd4375-6fa3-45f3-947a-e5ca050d03ab-service-ca\") pod \"console-7796957d69-r8tsd\" (UID: \"04cd4375-6fa3-45f3-947a-e5ca050d03ab\") " pod="openshift-console/console-7796957d69-r8tsd" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.011662 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/04cd4375-6fa3-45f3-947a-e5ca050d03ab-console-oauth-config\") pod \"console-7796957d69-r8tsd\" (UID: \"04cd4375-6fa3-45f3-947a-e5ca050d03ab\") " pod="openshift-console/console-7796957d69-r8tsd" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.011723 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pqdwm\" (UniqueName: \"kubernetes.io/projected/04cd4375-6fa3-45f3-947a-e5ca050d03ab-kube-api-access-pqdwm\") pod \"console-7796957d69-r8tsd\" (UID: \"04cd4375-6fa3-45f3-947a-e5ca050d03ab\") " pod="openshift-console/console-7796957d69-r8tsd" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.011751 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/04cd4375-6fa3-45f3-947a-e5ca050d03ab-console-config\") pod \"console-7796957d69-r8tsd\" (UID: \"04cd4375-6fa3-45f3-947a-e5ca050d03ab\") " pod="openshift-console/console-7796957d69-r8tsd" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.011777 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/04cd4375-6fa3-45f3-947a-e5ca050d03ab-oauth-serving-cert\") pod \"console-7796957d69-r8tsd\" (UID: \"04cd4375-6fa3-45f3-947a-e5ca050d03ab\") " pod="openshift-console/console-7796957d69-r8tsd" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.011790 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/04cd4375-6fa3-45f3-947a-e5ca050d03ab-console-serving-cert\") pod \"console-7796957d69-r8tsd\" (UID: \"04cd4375-6fa3-45f3-947a-e5ca050d03ab\") " pod="openshift-console/console-7796957d69-r8tsd" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.011808 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/04cd4375-6fa3-45f3-947a-e5ca050d03ab-trusted-ca-bundle\") pod \"console-7796957d69-r8tsd\" (UID: \"04cd4375-6fa3-45f3-947a-e5ca050d03ab\") " pod="openshift-console/console-7796957d69-r8tsd" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.112486 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/04cd4375-6fa3-45f3-947a-e5ca050d03ab-service-ca\") pod \"console-7796957d69-r8tsd\" (UID: \"04cd4375-6fa3-45f3-947a-e5ca050d03ab\") " pod="openshift-console/console-7796957d69-r8tsd" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.112529 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/04cd4375-6fa3-45f3-947a-e5ca050d03ab-console-oauth-config\") pod \"console-7796957d69-r8tsd\" (UID: \"04cd4375-6fa3-45f3-947a-e5ca050d03ab\") " pod="openshift-console/console-7796957d69-r8tsd" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.112574 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pqdwm\" (UniqueName: \"kubernetes.io/projected/04cd4375-6fa3-45f3-947a-e5ca050d03ab-kube-api-access-pqdwm\") pod \"console-7796957d69-r8tsd\" (UID: \"04cd4375-6fa3-45f3-947a-e5ca050d03ab\") " pod="openshift-console/console-7796957d69-r8tsd" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.112595 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/04cd4375-6fa3-45f3-947a-e5ca050d03ab-console-config\") pod \"console-7796957d69-r8tsd\" (UID: \"04cd4375-6fa3-45f3-947a-e5ca050d03ab\") " pod="openshift-console/console-7796957d69-r8tsd" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.113045 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/04cd4375-6fa3-45f3-947a-e5ca050d03ab-console-serving-cert\") pod \"console-7796957d69-r8tsd\" (UID: \"04cd4375-6fa3-45f3-947a-e5ca050d03ab\") " pod="openshift-console/console-7796957d69-r8tsd" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.113555 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/04cd4375-6fa3-45f3-947a-e5ca050d03ab-oauth-serving-cert\") pod \"console-7796957d69-r8tsd\" (UID: \"04cd4375-6fa3-45f3-947a-e5ca050d03ab\") " pod="openshift-console/console-7796957d69-r8tsd" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.113602 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/04cd4375-6fa3-45f3-947a-e5ca050d03ab-trusted-ca-bundle\") pod \"console-7796957d69-r8tsd\" (UID: \"04cd4375-6fa3-45f3-947a-e5ca050d03ab\") " pod="openshift-console/console-7796957d69-r8tsd" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.113601 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/04cd4375-6fa3-45f3-947a-e5ca050d03ab-console-config\") pod \"console-7796957d69-r8tsd\" (UID: \"04cd4375-6fa3-45f3-947a-e5ca050d03ab\") " pod="openshift-console/console-7796957d69-r8tsd" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.113946 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/04cd4375-6fa3-45f3-947a-e5ca050d03ab-service-ca\") pod \"console-7796957d69-r8tsd\" (UID: \"04cd4375-6fa3-45f3-947a-e5ca050d03ab\") " pod="openshift-console/console-7796957d69-r8tsd" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.114170 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/04cd4375-6fa3-45f3-947a-e5ca050d03ab-oauth-serving-cert\") pod \"console-7796957d69-r8tsd\" (UID: \"04cd4375-6fa3-45f3-947a-e5ca050d03ab\") " pod="openshift-console/console-7796957d69-r8tsd" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.114465 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/04cd4375-6fa3-45f3-947a-e5ca050d03ab-trusted-ca-bundle\") pod \"console-7796957d69-r8tsd\" (UID: \"04cd4375-6fa3-45f3-947a-e5ca050d03ab\") " pod="openshift-console/console-7796957d69-r8tsd" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.117609 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/04cd4375-6fa3-45f3-947a-e5ca050d03ab-console-oauth-config\") pod \"console-7796957d69-r8tsd\" (UID: \"04cd4375-6fa3-45f3-947a-e5ca050d03ab\") " pod="openshift-console/console-7796957d69-r8tsd" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.128332 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pqdwm\" (UniqueName: \"kubernetes.io/projected/04cd4375-6fa3-45f3-947a-e5ca050d03ab-kube-api-access-pqdwm\") pod \"console-7796957d69-r8tsd\" (UID: \"04cd4375-6fa3-45f3-947a-e5ca050d03ab\") " pod="openshift-console/console-7796957d69-r8tsd" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.137724 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/04cd4375-6fa3-45f3-947a-e5ca050d03ab-console-serving-cert\") pod \"console-7796957d69-r8tsd\" (UID: \"04cd4375-6fa3-45f3-947a-e5ca050d03ab\") " pod="openshift-console/console-7796957d69-r8tsd" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.198382 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7796957d69-r8tsd" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.471045 4854 generic.go:334] "Generic (PLEG): container finished" podID="64d5859f-ec07-4744-9534-bca1c147c0a5" containerID="9a405e1ff5df90b67b5e9fa8683c4680fb6cfdee8f31acf662cf7f241b7e0ca3" exitCode=0 Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.472906 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"64d5859f-ec07-4744-9534-bca1c147c0a5","Type":"ContainerDied","Data":"9a405e1ff5df90b67b5e9fa8683c4680fb6cfdee8f31acf662cf7f241b7e0ca3"} Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.699570 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/metrics-server-7d6666bf55-vhxwx"] Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.700332 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/metrics-server-7d6666bf55-vhxwx" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.701707 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-client-certs" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.701874 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-tls" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.702350 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kubelet-serving-ca-bundle" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.702577 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-asmngk5jr8a3s" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.703327 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"metrics-server-audit-profiles" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.703886 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-dockercfg-fzf6r" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.723663 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/metrics-server-7d6666bf55-vhxwx"] Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.729283 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec28be0c-f904-4548-aca0-80e0a5d05a23-client-ca-bundle\") pod \"metrics-server-7d6666bf55-vhxwx\" (UID: \"ec28be0c-f904-4548-aca0-80e0a5d05a23\") " pod="openshift-monitoring/metrics-server-7d6666bf55-vhxwx" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.729423 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f4dkz\" (UniqueName: \"kubernetes.io/projected/ec28be0c-f904-4548-aca0-80e0a5d05a23-kube-api-access-f4dkz\") pod \"metrics-server-7d6666bf55-vhxwx\" (UID: \"ec28be0c-f904-4548-aca0-80e0a5d05a23\") " pod="openshift-monitoring/metrics-server-7d6666bf55-vhxwx" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.729472 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ec28be0c-f904-4548-aca0-80e0a5d05a23-configmap-kubelet-serving-ca-bundle\") pod \"metrics-server-7d6666bf55-vhxwx\" (UID: \"ec28be0c-f904-4548-aca0-80e0a5d05a23\") " pod="openshift-monitoring/metrics-server-7d6666bf55-vhxwx" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.729509 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-log\" (UniqueName: \"kubernetes.io/empty-dir/ec28be0c-f904-4548-aca0-80e0a5d05a23-audit-log\") pod \"metrics-server-7d6666bf55-vhxwx\" (UID: \"ec28be0c-f904-4548-aca0-80e0a5d05a23\") " pod="openshift-monitoring/metrics-server-7d6666bf55-vhxwx" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.729719 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/ec28be0c-f904-4548-aca0-80e0a5d05a23-secret-metrics-client-certs\") pod \"metrics-server-7d6666bf55-vhxwx\" (UID: \"ec28be0c-f904-4548-aca0-80e0a5d05a23\") " pod="openshift-monitoring/metrics-server-7d6666bf55-vhxwx" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.729755 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-metrics-server-tls\" (UniqueName: \"kubernetes.io/secret/ec28be0c-f904-4548-aca0-80e0a5d05a23-secret-metrics-server-tls\") pod \"metrics-server-7d6666bf55-vhxwx\" (UID: \"ec28be0c-f904-4548-aca0-80e0a5d05a23\") " pod="openshift-monitoring/metrics-server-7d6666bf55-vhxwx" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.729815 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-server-audit-profiles\" (UniqueName: \"kubernetes.io/configmap/ec28be0c-f904-4548-aca0-80e0a5d05a23-metrics-server-audit-profiles\") pod \"metrics-server-7d6666bf55-vhxwx\" (UID: \"ec28be0c-f904-4548-aca0-80e0a5d05a23\") " pod="openshift-monitoring/metrics-server-7d6666bf55-vhxwx" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.830403 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/ec28be0c-f904-4548-aca0-80e0a5d05a23-secret-metrics-client-certs\") pod \"metrics-server-7d6666bf55-vhxwx\" (UID: \"ec28be0c-f904-4548-aca0-80e0a5d05a23\") " pod="openshift-monitoring/metrics-server-7d6666bf55-vhxwx" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.830452 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-metrics-server-tls\" (UniqueName: \"kubernetes.io/secret/ec28be0c-f904-4548-aca0-80e0a5d05a23-secret-metrics-server-tls\") pod \"metrics-server-7d6666bf55-vhxwx\" (UID: \"ec28be0c-f904-4548-aca0-80e0a5d05a23\") " pod="openshift-monitoring/metrics-server-7d6666bf55-vhxwx" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.830488 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-server-audit-profiles\" (UniqueName: \"kubernetes.io/configmap/ec28be0c-f904-4548-aca0-80e0a5d05a23-metrics-server-audit-profiles\") pod \"metrics-server-7d6666bf55-vhxwx\" (UID: \"ec28be0c-f904-4548-aca0-80e0a5d05a23\") " pod="openshift-monitoring/metrics-server-7d6666bf55-vhxwx" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.830536 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec28be0c-f904-4548-aca0-80e0a5d05a23-client-ca-bundle\") pod \"metrics-server-7d6666bf55-vhxwx\" (UID: \"ec28be0c-f904-4548-aca0-80e0a5d05a23\") " pod="openshift-monitoring/metrics-server-7d6666bf55-vhxwx" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.830573 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f4dkz\" (UniqueName: \"kubernetes.io/projected/ec28be0c-f904-4548-aca0-80e0a5d05a23-kube-api-access-f4dkz\") pod \"metrics-server-7d6666bf55-vhxwx\" (UID: \"ec28be0c-f904-4548-aca0-80e0a5d05a23\") " pod="openshift-monitoring/metrics-server-7d6666bf55-vhxwx" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.830600 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ec28be0c-f904-4548-aca0-80e0a5d05a23-configmap-kubelet-serving-ca-bundle\") pod \"metrics-server-7d6666bf55-vhxwx\" (UID: \"ec28be0c-f904-4548-aca0-80e0a5d05a23\") " pod="openshift-monitoring/metrics-server-7d6666bf55-vhxwx" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.830626 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-log\" (UniqueName: \"kubernetes.io/empty-dir/ec28be0c-f904-4548-aca0-80e0a5d05a23-audit-log\") pod \"metrics-server-7d6666bf55-vhxwx\" (UID: \"ec28be0c-f904-4548-aca0-80e0a5d05a23\") " pod="openshift-monitoring/metrics-server-7d6666bf55-vhxwx" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.831128 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-log\" (UniqueName: \"kubernetes.io/empty-dir/ec28be0c-f904-4548-aca0-80e0a5d05a23-audit-log\") pod \"metrics-server-7d6666bf55-vhxwx\" (UID: \"ec28be0c-f904-4548-aca0-80e0a5d05a23\") " pod="openshift-monitoring/metrics-server-7d6666bf55-vhxwx" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.832783 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-server-audit-profiles\" (UniqueName: \"kubernetes.io/configmap/ec28be0c-f904-4548-aca0-80e0a5d05a23-metrics-server-audit-profiles\") pod \"metrics-server-7d6666bf55-vhxwx\" (UID: \"ec28be0c-f904-4548-aca0-80e0a5d05a23\") " pod="openshift-monitoring/metrics-server-7d6666bf55-vhxwx" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.833187 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ec28be0c-f904-4548-aca0-80e0a5d05a23-configmap-kubelet-serving-ca-bundle\") pod \"metrics-server-7d6666bf55-vhxwx\" (UID: \"ec28be0c-f904-4548-aca0-80e0a5d05a23\") " pod="openshift-monitoring/metrics-server-7d6666bf55-vhxwx" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.836665 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec28be0c-f904-4548-aca0-80e0a5d05a23-client-ca-bundle\") pod \"metrics-server-7d6666bf55-vhxwx\" (UID: \"ec28be0c-f904-4548-aca0-80e0a5d05a23\") " pod="openshift-monitoring/metrics-server-7d6666bf55-vhxwx" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.836907 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-metrics-server-tls\" (UniqueName: \"kubernetes.io/secret/ec28be0c-f904-4548-aca0-80e0a5d05a23-secret-metrics-server-tls\") pod \"metrics-server-7d6666bf55-vhxwx\" (UID: \"ec28be0c-f904-4548-aca0-80e0a5d05a23\") " pod="openshift-monitoring/metrics-server-7d6666bf55-vhxwx" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.846511 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/ec28be0c-f904-4548-aca0-80e0a5d05a23-secret-metrics-client-certs\") pod \"metrics-server-7d6666bf55-vhxwx\" (UID: \"ec28be0c-f904-4548-aca0-80e0a5d05a23\") " pod="openshift-monitoring/metrics-server-7d6666bf55-vhxwx" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.853914 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f4dkz\" (UniqueName: \"kubernetes.io/projected/ec28be0c-f904-4548-aca0-80e0a5d05a23-kube-api-access-f4dkz\") pod \"metrics-server-7d6666bf55-vhxwx\" (UID: \"ec28be0c-f904-4548-aca0-80e0a5d05a23\") " pod="openshift-monitoring/metrics-server-7d6666bf55-vhxwx" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.868289 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/monitoring-plugin-58477477d7-6fwdb"] Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.869381 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/monitoring-plugin-58477477d7-6fwdb" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.871468 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"default-dockercfg-6tstp" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.871725 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"monitoring-plugin-cert" Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.872746 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/monitoring-plugin-58477477d7-6fwdb"] Nov 25 09:40:55 crc kubenswrapper[4854]: I1125 09:40:55.931955 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"monitoring-plugin-cert\" (UniqueName: \"kubernetes.io/secret/6631da7e-9cd7-4bea-8cc5-6f1fbf56be7e-monitoring-plugin-cert\") pod \"monitoring-plugin-58477477d7-6fwdb\" (UID: \"6631da7e-9cd7-4bea-8cc5-6f1fbf56be7e\") " pod="openshift-monitoring/monitoring-plugin-58477477d7-6fwdb" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.027510 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/metrics-server-7d6666bf55-vhxwx" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.033413 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"monitoring-plugin-cert\" (UniqueName: \"kubernetes.io/secret/6631da7e-9cd7-4bea-8cc5-6f1fbf56be7e-monitoring-plugin-cert\") pod \"monitoring-plugin-58477477d7-6fwdb\" (UID: \"6631da7e-9cd7-4bea-8cc5-6f1fbf56be7e\") " pod="openshift-monitoring/monitoring-plugin-58477477d7-6fwdb" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.054689 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"monitoring-plugin-cert\" (UniqueName: \"kubernetes.io/secret/6631da7e-9cd7-4bea-8cc5-6f1fbf56be7e-monitoring-plugin-cert\") pod \"monitoring-plugin-58477477d7-6fwdb\" (UID: \"6631da7e-9cd7-4bea-8cc5-6f1fbf56be7e\") " pod="openshift-monitoring/monitoring-plugin-58477477d7-6fwdb" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.198935 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/monitoring-plugin-58477477d7-6fwdb" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.406974 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/monitoring-plugin-58477477d7-6fwdb"] Nov 25 09:40:56 crc kubenswrapper[4854]: W1125 09:40:56.417119 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6631da7e_9cd7_4bea_8cc5_6f1fbf56be7e.slice/crio-7aaf2e653025a5d3c8378f162e17ff698822940c28ee0a0016496be19398f783 WatchSource:0}: Error finding container 7aaf2e653025a5d3c8378f162e17ff698822940c28ee0a0016496be19398f783: Status 404 returned error can't find the container with id 7aaf2e653025a5d3c8378f162e17ff698822940c28ee0a0016496be19398f783 Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.426135 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/prometheus-k8s-0"] Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.429986 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.433558 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-rbac-proxy" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.433857 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-tls-assets-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.434150 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-thanos-prometheus-http-client-file" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.434211 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-kube-rbac-proxy-web" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.434222 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-grpc-tls-31000036s10th" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.434277 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"serving-certs-ca-bundle" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.434375 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-tls" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.434504 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-dockercfg-d7gsb" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.434525 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-web-config" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.435969 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.436029 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-thanos-sidecar-tls" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.440176 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-7796957d69-r8tsd"] Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.446662 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"prometheus-k8s-rulefiles-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.448285 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"prometheus-trusted-ca-bundle" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.449334 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-k8s-0"] Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.477068 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7796957d69-r8tsd" event={"ID":"04cd4375-6fa3-45f3-947a-e5ca050d03ab","Type":"ContainerStarted","Data":"ea7e180bd3e5d45b024d2f848a4869c29d39bc0ce98df224747673b6c418d620"} Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.479368 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/monitoring-plugin-58477477d7-6fwdb" event={"ID":"6631da7e-9cd7-4bea-8cc5-6f1fbf56be7e","Type":"ContainerStarted","Data":"7aaf2e653025a5d3c8378f162e17ff698822940c28ee0a0016496be19398f783"} Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.498808 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/metrics-server-7d6666bf55-vhxwx"] Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.548589 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b825aef9-6502-4376-85ce-1fab5a42200a-prometheus-trusted-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.548647 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/b825aef9-6502-4376-85ce-1fab5a42200a-thanos-prometheus-http-client-file\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.548711 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/b825aef9-6502-4376-85ce-1fab5a42200a-config-out\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.548731 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-k8s-db\" (UniqueName: \"kubernetes.io/empty-dir/b825aef9-6502-4376-85ce-1fab5a42200a-prometheus-k8s-db\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.548749 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/b825aef9-6502-4376-85ce-1fab5a42200a-web-config\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.548770 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/b825aef9-6502-4376-85ce-1fab5a42200a-secret-kube-rbac-proxy\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.548801 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-serving-certs-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b825aef9-6502-4376-85ce-1fab5a42200a-configmap-serving-certs-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.548822 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-prometheus-k8s-thanos-sidecar-tls\" (UniqueName: \"kubernetes.io/secret/b825aef9-6502-4376-85ce-1fab5a42200a-secret-prometheus-k8s-thanos-sidecar-tls\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.548869 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-k8s-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/b825aef9-6502-4376-85ce-1fab5a42200a-prometheus-k8s-rulefiles-0\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.548892 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vlh45\" (UniqueName: \"kubernetes.io/projected/b825aef9-6502-4376-85ce-1fab5a42200a-kube-api-access-vlh45\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.548937 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-prometheus-k8s-tls\" (UniqueName: \"kubernetes.io/secret/b825aef9-6502-4376-85ce-1fab5a42200a-secret-prometheus-k8s-tls\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.548959 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-prometheus-k8s-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/b825aef9-6502-4376-85ce-1fab5a42200a-secret-prometheus-k8s-kube-rbac-proxy-web\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.548986 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/b825aef9-6502-4376-85ce-1fab5a42200a-secret-grpc-tls\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.549020 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b825aef9-6502-4376-85ce-1fab5a42200a-configmap-kubelet-serving-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.549045 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/b825aef9-6502-4376-85ce-1fab5a42200a-configmap-metrics-client-ca\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.549077 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b825aef9-6502-4376-85ce-1fab5a42200a-config\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.549101 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/b825aef9-6502-4376-85ce-1fab5a42200a-tls-assets\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.549124 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/b825aef9-6502-4376-85ce-1fab5a42200a-secret-metrics-client-certs\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.649791 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/b825aef9-6502-4376-85ce-1fab5a42200a-configmap-metrics-client-ca\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.649853 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b825aef9-6502-4376-85ce-1fab5a42200a-config\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.649874 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/b825aef9-6502-4376-85ce-1fab5a42200a-tls-assets\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.649898 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/b825aef9-6502-4376-85ce-1fab5a42200a-secret-metrics-client-certs\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.649924 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b825aef9-6502-4376-85ce-1fab5a42200a-prometheus-trusted-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.649956 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/b825aef9-6502-4376-85ce-1fab5a42200a-thanos-prometheus-http-client-file\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.649979 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-k8s-db\" (UniqueName: \"kubernetes.io/empty-dir/b825aef9-6502-4376-85ce-1fab5a42200a-prometheus-k8s-db\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.649997 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/b825aef9-6502-4376-85ce-1fab5a42200a-config-out\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.650017 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/b825aef9-6502-4376-85ce-1fab5a42200a-web-config\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.650042 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/b825aef9-6502-4376-85ce-1fab5a42200a-secret-kube-rbac-proxy\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.650075 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-serving-certs-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b825aef9-6502-4376-85ce-1fab5a42200a-configmap-serving-certs-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.650106 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-prometheus-k8s-thanos-sidecar-tls\" (UniqueName: \"kubernetes.io/secret/b825aef9-6502-4376-85ce-1fab5a42200a-secret-prometheus-k8s-thanos-sidecar-tls\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.650149 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-k8s-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/b825aef9-6502-4376-85ce-1fab5a42200a-prometheus-k8s-rulefiles-0\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.650172 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vlh45\" (UniqueName: \"kubernetes.io/projected/b825aef9-6502-4376-85ce-1fab5a42200a-kube-api-access-vlh45\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.650210 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-prometheus-k8s-tls\" (UniqueName: \"kubernetes.io/secret/b825aef9-6502-4376-85ce-1fab5a42200a-secret-prometheus-k8s-tls\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.650230 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-prometheus-k8s-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/b825aef9-6502-4376-85ce-1fab5a42200a-secret-prometheus-k8s-kube-rbac-proxy-web\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.650266 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/b825aef9-6502-4376-85ce-1fab5a42200a-secret-grpc-tls\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.650297 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b825aef9-6502-4376-85ce-1fab5a42200a-configmap-kubelet-serving-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.650955 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b825aef9-6502-4376-85ce-1fab5a42200a-configmap-kubelet-serving-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.651007 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/b825aef9-6502-4376-85ce-1fab5a42200a-configmap-metrics-client-ca\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.651261 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b825aef9-6502-4376-85ce-1fab5a42200a-prometheus-trusted-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.651743 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-k8s-db\" (UniqueName: \"kubernetes.io/empty-dir/b825aef9-6502-4376-85ce-1fab5a42200a-prometheus-k8s-db\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.652573 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-serving-certs-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b825aef9-6502-4376-85ce-1fab5a42200a-configmap-serving-certs-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.656321 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-prometheus-k8s-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/b825aef9-6502-4376-85ce-1fab5a42200a-secret-prometheus-k8s-kube-rbac-proxy-web\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.656357 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/b825aef9-6502-4376-85ce-1fab5a42200a-secret-grpc-tls\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.656485 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/b825aef9-6502-4376-85ce-1fab5a42200a-tls-assets\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.656621 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-prometheus-k8s-tls\" (UniqueName: \"kubernetes.io/secret/b825aef9-6502-4376-85ce-1fab5a42200a-secret-prometheus-k8s-tls\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.657007 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/b825aef9-6502-4376-85ce-1fab5a42200a-thanos-prometheus-http-client-file\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.657765 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/b825aef9-6502-4376-85ce-1fab5a42200a-secret-kube-rbac-proxy\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.657969 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/b825aef9-6502-4376-85ce-1fab5a42200a-web-config\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.658011 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/b825aef9-6502-4376-85ce-1fab5a42200a-config-out\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.658469 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-prometheus-k8s-thanos-sidecar-tls\" (UniqueName: \"kubernetes.io/secret/b825aef9-6502-4376-85ce-1fab5a42200a-secret-prometheus-k8s-thanos-sidecar-tls\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.660179 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/b825aef9-6502-4376-85ce-1fab5a42200a-secret-metrics-client-certs\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.667430 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/b825aef9-6502-4376-85ce-1fab5a42200a-config\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.673756 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vlh45\" (UniqueName: \"kubernetes.io/projected/b825aef9-6502-4376-85ce-1fab5a42200a-kube-api-access-vlh45\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.673997 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-k8s-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/b825aef9-6502-4376-85ce-1fab5a42200a-prometheus-k8s-rulefiles-0\") pod \"prometheus-k8s-0\" (UID: \"b825aef9-6502-4376-85ce-1fab5a42200a\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:56 crc kubenswrapper[4854]: I1125 09:40:56.752066 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:40:57 crc kubenswrapper[4854]: I1125 09:40:57.011734 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-k8s-0"] Nov 25 09:40:57 crc kubenswrapper[4854]: I1125 09:40:57.487145 4854 generic.go:334] "Generic (PLEG): container finished" podID="b825aef9-6502-4376-85ce-1fab5a42200a" containerID="d89deb6c0ccc70b7103cd581a38b1081689e163699c5d09ee14312acd9da900f" exitCode=0 Nov 25 09:40:57 crc kubenswrapper[4854]: I1125 09:40:57.487250 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"b825aef9-6502-4376-85ce-1fab5a42200a","Type":"ContainerDied","Data":"d89deb6c0ccc70b7103cd581a38b1081689e163699c5d09ee14312acd9da900f"} Nov 25 09:40:57 crc kubenswrapper[4854]: I1125 09:40:57.487550 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"b825aef9-6502-4376-85ce-1fab5a42200a","Type":"ContainerStarted","Data":"5ffd509e73b70f51bdf460b4bfb617e1d350663f69c3ba0d526beaf72b4dbe38"} Nov 25 09:40:57 crc kubenswrapper[4854]: I1125 09:40:57.490620 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/metrics-server-7d6666bf55-vhxwx" event={"ID":"ec28be0c-f904-4548-aca0-80e0a5d05a23","Type":"ContainerStarted","Data":"91bf5314bf905bd80cc41756b822f6470c977c3f6fcef1e56ec81006b88528ef"} Nov 25 09:40:57 crc kubenswrapper[4854]: I1125 09:40:57.496856 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-b96897c74-47cqm" event={"ID":"92023885-45a6-401d-b121-c04f13ee6790","Type":"ContainerStarted","Data":"61f6f6ef133c55c46da3d53ebeda4fd31dd588e2544fa57b8da90a1d4695c4d0"} Nov 25 09:40:57 crc kubenswrapper[4854]: I1125 09:40:57.496914 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-b96897c74-47cqm" event={"ID":"92023885-45a6-401d-b121-c04f13ee6790","Type":"ContainerStarted","Data":"f385a04773decf5bddba3e3f25655b6193833abc521a241f39c3cabf7124edb1"} Nov 25 09:40:57 crc kubenswrapper[4854]: I1125 09:40:57.496931 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-b96897c74-47cqm" event={"ID":"92023885-45a6-401d-b121-c04f13ee6790","Type":"ContainerStarted","Data":"2eb690496b39076683877322b10a77232995c6fd87bec5e978ef6bf8792448de"} Nov 25 09:40:57 crc kubenswrapper[4854]: I1125 09:40:57.499503 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7796957d69-r8tsd" event={"ID":"04cd4375-6fa3-45f3-947a-e5ca050d03ab","Type":"ContainerStarted","Data":"0eb44c4ead739d4bcffa1f11f145ae1a225284764b69c00b22e7593a8a21e0fc"} Nov 25 09:40:57 crc kubenswrapper[4854]: I1125 09:40:57.538563 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-7796957d69-r8tsd" podStartSLOduration=3.538547275 podStartE2EDuration="3.538547275s" podCreationTimestamp="2025-11-25 09:40:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:40:57.536933588 +0000 UTC m=+263.389926964" watchObservedRunningTime="2025-11-25 09:40:57.538547275 +0000 UTC m=+263.391540641" Nov 25 09:41:01 crc kubenswrapper[4854]: I1125 09:41:01.202754 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-p64dl" Nov 25 09:41:01 crc kubenswrapper[4854]: I1125 09:41:01.257240 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-sd29s"] Nov 25 09:41:01 crc kubenswrapper[4854]: I1125 09:41:01.522211 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/monitoring-plugin-58477477d7-6fwdb" event={"ID":"6631da7e-9cd7-4bea-8cc5-6f1fbf56be7e","Type":"ContainerStarted","Data":"1f8a0e3eaa70001bb93b9832cb1b9f258dea3e7e41641a420fd119c9abd1883c"} Nov 25 09:41:01 crc kubenswrapper[4854]: I1125 09:41:01.523454 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/monitoring-plugin-58477477d7-6fwdb" Nov 25 09:41:01 crc kubenswrapper[4854]: I1125 09:41:01.526211 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"64d5859f-ec07-4744-9534-bca1c147c0a5","Type":"ContainerStarted","Data":"a3b9a085feed059d78397c9e8e5ba33abc15d709ff8da6e7921e24ea5d48a2ab"} Nov 25 09:41:01 crc kubenswrapper[4854]: I1125 09:41:01.528296 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/metrics-server-7d6666bf55-vhxwx" event={"ID":"ec28be0c-f904-4548-aca0-80e0a5d05a23","Type":"ContainerStarted","Data":"d1000c3ede96479342763e23c46b0d3b0054785c949b7e74e1465b3250b9f1ef"} Nov 25 09:41:01 crc kubenswrapper[4854]: I1125 09:41:01.528999 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/monitoring-plugin-58477477d7-6fwdb" Nov 25 09:41:01 crc kubenswrapper[4854]: I1125 09:41:01.531017 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-b96897c74-47cqm" event={"ID":"92023885-45a6-401d-b121-c04f13ee6790","Type":"ContainerStarted","Data":"1a02dda94ad5dc849c5899b0429b9cecce79b176bc64c98402c174b0e5a28df8"} Nov 25 09:41:01 crc kubenswrapper[4854]: I1125 09:41:01.558639 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/monitoring-plugin-58477477d7-6fwdb" podStartSLOduration=2.690610555 podStartE2EDuration="6.558619651s" podCreationTimestamp="2025-11-25 09:40:55 +0000 UTC" firstStartedPulling="2025-11-25 09:40:56.419207062 +0000 UTC m=+262.272200438" lastFinishedPulling="2025-11-25 09:41:00.287216158 +0000 UTC m=+266.140209534" observedRunningTime="2025-11-25 09:41:01.543159682 +0000 UTC m=+267.396153078" watchObservedRunningTime="2025-11-25 09:41:01.558619651 +0000 UTC m=+267.411613027" Nov 25 09:41:01 crc kubenswrapper[4854]: I1125 09:41:01.560790 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/metrics-server-7d6666bf55-vhxwx" podStartSLOduration=2.787324768 podStartE2EDuration="6.560773835s" podCreationTimestamp="2025-11-25 09:40:55 +0000 UTC" firstStartedPulling="2025-11-25 09:40:56.511804744 +0000 UTC m=+262.364798120" lastFinishedPulling="2025-11-25 09:41:00.285253811 +0000 UTC m=+266.138247187" observedRunningTime="2025-11-25 09:41:01.557299912 +0000 UTC m=+267.410293288" watchObservedRunningTime="2025-11-25 09:41:01.560773835 +0000 UTC m=+267.413767211" Nov 25 09:41:02 crc kubenswrapper[4854]: I1125 09:41:02.541214 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"64d5859f-ec07-4744-9534-bca1c147c0a5","Type":"ContainerStarted","Data":"a76aefc7257722720ab364a364cad56b2fb6a8fcb010cd01fe6eb1a462b4a520"} Nov 25 09:41:02 crc kubenswrapper[4854]: I1125 09:41:02.544738 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-b96897c74-47cqm" event={"ID":"92023885-45a6-401d-b121-c04f13ee6790","Type":"ContainerStarted","Data":"4769901c879c9ecf930c3171efec9a7faecc4feac22f546dac249e6ccd7eaea7"} Nov 25 09:41:03 crc kubenswrapper[4854]: I1125 09:41:03.556906 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"64d5859f-ec07-4744-9534-bca1c147c0a5","Type":"ContainerStarted","Data":"aaaee29bb9d389b7f9320b53b1354ebfa4b533a2af95aebe25dbf4b0e371222b"} Nov 25 09:41:04 crc kubenswrapper[4854]: I1125 09:41:04.601823 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"b825aef9-6502-4376-85ce-1fab5a42200a","Type":"ContainerStarted","Data":"52a5fd31d81fd0170eb156ea646198b5316e289139592e49df0d342be6b2b649"} Nov 25 09:41:04 crc kubenswrapper[4854]: I1125 09:41:04.602206 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"b825aef9-6502-4376-85ce-1fab5a42200a","Type":"ContainerStarted","Data":"2472afb2003e7fba1ba784e62f71f08c0b0ea308236a8bcb09e0d789590ca01c"} Nov 25 09:41:04 crc kubenswrapper[4854]: I1125 09:41:04.610885 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"64d5859f-ec07-4744-9534-bca1c147c0a5","Type":"ContainerStarted","Data":"c2d90b0bc067c5483113b44c3aa017cb7d83e8a7c2135e88d1286491e795ec11"} Nov 25 09:41:04 crc kubenswrapper[4854]: I1125 09:41:04.610932 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"64d5859f-ec07-4744-9534-bca1c147c0a5","Type":"ContainerStarted","Data":"5e4f02d9151fd9cf01e84ad8bbb6270c8f6ae1feb9bf1fee699a1689fd0ad767"} Nov 25 09:41:04 crc kubenswrapper[4854]: I1125 09:41:04.619390 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-b96897c74-47cqm" event={"ID":"92023885-45a6-401d-b121-c04f13ee6790","Type":"ContainerStarted","Data":"56e2e24edc451b55273991452db36bc0bb4a8a5fce67e4439266795de9b147ea"} Nov 25 09:41:04 crc kubenswrapper[4854]: I1125 09:41:04.620130 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/thanos-querier-b96897c74-47cqm" Nov 25 09:41:04 crc kubenswrapper[4854]: I1125 09:41:04.646377 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/thanos-querier-b96897c74-47cqm" podStartSLOduration=5.358804984 podStartE2EDuration="12.646358343s" podCreationTimestamp="2025-11-25 09:40:52 +0000 UTC" firstStartedPulling="2025-11-25 09:40:53.107624918 +0000 UTC m=+258.960618294" lastFinishedPulling="2025-11-25 09:41:00.395178277 +0000 UTC m=+266.248171653" observedRunningTime="2025-11-25 09:41:04.644123536 +0000 UTC m=+270.497116922" watchObservedRunningTime="2025-11-25 09:41:04.646358343 +0000 UTC m=+270.499351719" Nov 25 09:41:04 crc kubenswrapper[4854]: I1125 09:41:04.666495 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 25 09:41:04 crc kubenswrapper[4854]: I1125 09:41:04.667320 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 09:41:04 crc kubenswrapper[4854]: I1125 09:41:04.670916 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 25 09:41:04 crc kubenswrapper[4854]: I1125 09:41:04.671128 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 25 09:41:04 crc kubenswrapper[4854]: I1125 09:41:04.682183 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 25 09:41:04 crc kubenswrapper[4854]: I1125 09:41:04.691979 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/thanos-querier-b96897c74-47cqm" Nov 25 09:41:04 crc kubenswrapper[4854]: I1125 09:41:04.769938 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e7ab818b-30bd-498f-8299-fad888227ca3-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"e7ab818b-30bd-498f-8299-fad888227ca3\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 09:41:04 crc kubenswrapper[4854]: I1125 09:41:04.770107 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7ab818b-30bd-498f-8299-fad888227ca3-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"e7ab818b-30bd-498f-8299-fad888227ca3\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 09:41:04 crc kubenswrapper[4854]: I1125 09:41:04.871545 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e7ab818b-30bd-498f-8299-fad888227ca3-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"e7ab818b-30bd-498f-8299-fad888227ca3\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 09:41:04 crc kubenswrapper[4854]: I1125 09:41:04.871652 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7ab818b-30bd-498f-8299-fad888227ca3-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"e7ab818b-30bd-498f-8299-fad888227ca3\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 09:41:04 crc kubenswrapper[4854]: I1125 09:41:04.871823 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e7ab818b-30bd-498f-8299-fad888227ca3-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"e7ab818b-30bd-498f-8299-fad888227ca3\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 09:41:04 crc kubenswrapper[4854]: I1125 09:41:04.890142 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7ab818b-30bd-498f-8299-fad888227ca3-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"e7ab818b-30bd-498f-8299-fad888227ca3\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 09:41:05 crc kubenswrapper[4854]: I1125 09:41:05.065486 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 09:41:05 crc kubenswrapper[4854]: I1125 09:41:05.199575 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-7796957d69-r8tsd" Nov 25 09:41:05 crc kubenswrapper[4854]: I1125 09:41:05.202585 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-7796957d69-r8tsd" Nov 25 09:41:05 crc kubenswrapper[4854]: I1125 09:41:05.207954 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-7796957d69-r8tsd" Nov 25 09:41:05 crc kubenswrapper[4854]: I1125 09:41:05.305224 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 25 09:41:05 crc kubenswrapper[4854]: W1125 09:41:05.312716 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pode7ab818b_30bd_498f_8299_fad888227ca3.slice/crio-73b755691a3e4ac5b5cd420ede14a02d25d2ecd6da2fa2964cf41d11227e23a5 WatchSource:0}: Error finding container 73b755691a3e4ac5b5cd420ede14a02d25d2ecd6da2fa2964cf41d11227e23a5: Status 404 returned error can't find the container with id 73b755691a3e4ac5b5cd420ede14a02d25d2ecd6da2fa2964cf41d11227e23a5 Nov 25 09:41:05 crc kubenswrapper[4854]: I1125 09:41:05.628451 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"b825aef9-6502-4376-85ce-1fab5a42200a","Type":"ContainerStarted","Data":"e9ec87690aa940e9d7c27147e207dd4cf9028cac34334220c40e0d966d3703d5"} Nov 25 09:41:05 crc kubenswrapper[4854]: I1125 09:41:05.628501 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"b825aef9-6502-4376-85ce-1fab5a42200a","Type":"ContainerStarted","Data":"708ec39ef13a6e2c9941827f65163faf109bfe83adf9a2a98067cedb7392b9d0"} Nov 25 09:41:05 crc kubenswrapper[4854]: I1125 09:41:05.632038 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"64d5859f-ec07-4744-9534-bca1c147c0a5","Type":"ContainerStarted","Data":"0dada052bbc1fcf806030405d2b38e230feb017b318990df6f7af99965e3925c"} Nov 25 09:41:05 crc kubenswrapper[4854]: I1125 09:41:05.634818 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"e7ab818b-30bd-498f-8299-fad888227ca3","Type":"ContainerStarted","Data":"73b755691a3e4ac5b5cd420ede14a02d25d2ecd6da2fa2964cf41d11227e23a5"} Nov 25 09:41:05 crc kubenswrapper[4854]: I1125 09:41:05.637423 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-7796957d69-r8tsd" Nov 25 09:41:05 crc kubenswrapper[4854]: I1125 09:41:05.664398 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/alertmanager-main-0" podStartSLOduration=7.49857816 podStartE2EDuration="14.664379845s" podCreationTimestamp="2025-11-25 09:40:51 +0000 UTC" firstStartedPulling="2025-11-25 09:40:53.119447216 +0000 UTC m=+258.972440592" lastFinishedPulling="2025-11-25 09:41:00.285248911 +0000 UTC m=+266.138242277" observedRunningTime="2025-11-25 09:41:05.661729887 +0000 UTC m=+271.514723263" watchObservedRunningTime="2025-11-25 09:41:05.664379845 +0000 UTC m=+271.517373221" Nov 25 09:41:05 crc kubenswrapper[4854]: I1125 09:41:05.710269 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-m5689"] Nov 25 09:41:06 crc kubenswrapper[4854]: I1125 09:41:06.642858 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"b825aef9-6502-4376-85ce-1fab5a42200a","Type":"ContainerStarted","Data":"ab4ffdee5dec02462ab9b04e643c85aaa9275475126816acc324d1901517cbb9"} Nov 25 09:41:06 crc kubenswrapper[4854]: I1125 09:41:06.643184 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"b825aef9-6502-4376-85ce-1fab5a42200a","Type":"ContainerStarted","Data":"50147a4749b5f2eab3b58a2222475da8d658ab3f16bb34d14db7eec29bdd4bb7"} Nov 25 09:41:06 crc kubenswrapper[4854]: I1125 09:41:06.645157 4854 generic.go:334] "Generic (PLEG): container finished" podID="e7ab818b-30bd-498f-8299-fad888227ca3" containerID="2a2851647c651b0aa68e65ba2538ce3b78af307eddabed27bb9a7755087984db" exitCode=0 Nov 25 09:41:06 crc kubenswrapper[4854]: I1125 09:41:06.645259 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"e7ab818b-30bd-498f-8299-fad888227ca3","Type":"ContainerDied","Data":"2a2851647c651b0aa68e65ba2538ce3b78af307eddabed27bb9a7755087984db"} Nov 25 09:41:06 crc kubenswrapper[4854]: I1125 09:41:06.686023 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/prometheus-k8s-0" podStartSLOduration=4.59077041 podStartE2EDuration="10.686007236s" podCreationTimestamp="2025-11-25 09:40:56 +0000 UTC" firstStartedPulling="2025-11-25 09:40:57.490157557 +0000 UTC m=+263.343150933" lastFinishedPulling="2025-11-25 09:41:03.585394383 +0000 UTC m=+269.438387759" observedRunningTime="2025-11-25 09:41:06.684367957 +0000 UTC m=+272.537361353" watchObservedRunningTime="2025-11-25 09:41:06.686007236 +0000 UTC m=+272.539000612" Nov 25 09:41:06 crc kubenswrapper[4854]: I1125 09:41:06.752238 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:41:07 crc kubenswrapper[4854]: I1125 09:41:07.913238 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 09:41:08 crc kubenswrapper[4854]: I1125 09:41:08.019462 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e7ab818b-30bd-498f-8299-fad888227ca3-kubelet-dir\") pod \"e7ab818b-30bd-498f-8299-fad888227ca3\" (UID: \"e7ab818b-30bd-498f-8299-fad888227ca3\") " Nov 25 09:41:08 crc kubenswrapper[4854]: I1125 09:41:08.019509 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7ab818b-30bd-498f-8299-fad888227ca3-kube-api-access\") pod \"e7ab818b-30bd-498f-8299-fad888227ca3\" (UID: \"e7ab818b-30bd-498f-8299-fad888227ca3\") " Nov 25 09:41:08 crc kubenswrapper[4854]: I1125 09:41:08.019569 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e7ab818b-30bd-498f-8299-fad888227ca3-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "e7ab818b-30bd-498f-8299-fad888227ca3" (UID: "e7ab818b-30bd-498f-8299-fad888227ca3"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:41:08 crc kubenswrapper[4854]: I1125 09:41:08.019910 4854 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e7ab818b-30bd-498f-8299-fad888227ca3-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 09:41:08 crc kubenswrapper[4854]: I1125 09:41:08.026968 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7ab818b-30bd-498f-8299-fad888227ca3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7ab818b-30bd-498f-8299-fad888227ca3" (UID: "e7ab818b-30bd-498f-8299-fad888227ca3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:41:08 crc kubenswrapper[4854]: I1125 09:41:08.120857 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7ab818b-30bd-498f-8299-fad888227ca3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 09:41:08 crc kubenswrapper[4854]: I1125 09:41:08.658070 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 09:41:08 crc kubenswrapper[4854]: I1125 09:41:08.658056 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"e7ab818b-30bd-498f-8299-fad888227ca3","Type":"ContainerDied","Data":"73b755691a3e4ac5b5cd420ede14a02d25d2ecd6da2fa2964cf41d11227e23a5"} Nov 25 09:41:08 crc kubenswrapper[4854]: I1125 09:41:08.658157 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="73b755691a3e4ac5b5cd420ede14a02d25d2ecd6da2fa2964cf41d11227e23a5" Nov 25 09:41:09 crc kubenswrapper[4854]: I1125 09:41:09.652644 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 25 09:41:09 crc kubenswrapper[4854]: E1125 09:41:09.652943 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7ab818b-30bd-498f-8299-fad888227ca3" containerName="pruner" Nov 25 09:41:09 crc kubenswrapper[4854]: I1125 09:41:09.652960 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7ab818b-30bd-498f-8299-fad888227ca3" containerName="pruner" Nov 25 09:41:09 crc kubenswrapper[4854]: I1125 09:41:09.653088 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="e7ab818b-30bd-498f-8299-fad888227ca3" containerName="pruner" Nov 25 09:41:09 crc kubenswrapper[4854]: I1125 09:41:09.653617 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 09:41:09 crc kubenswrapper[4854]: I1125 09:41:09.657030 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 25 09:41:09 crc kubenswrapper[4854]: I1125 09:41:09.657065 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 25 09:41:09 crc kubenswrapper[4854]: I1125 09:41:09.672729 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 25 09:41:09 crc kubenswrapper[4854]: I1125 09:41:09.745302 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/d0b5231b-f1be-4866-9f88-5b54b42a877a-var-lock\") pod \"installer-9-crc\" (UID: \"d0b5231b-f1be-4866-9f88-5b54b42a877a\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 09:41:09 crc kubenswrapper[4854]: I1125 09:41:09.745549 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d0b5231b-f1be-4866-9f88-5b54b42a877a-kube-api-access\") pod \"installer-9-crc\" (UID: \"d0b5231b-f1be-4866-9f88-5b54b42a877a\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 09:41:09 crc kubenswrapper[4854]: I1125 09:41:09.745603 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d0b5231b-f1be-4866-9f88-5b54b42a877a-kubelet-dir\") pod \"installer-9-crc\" (UID: \"d0b5231b-f1be-4866-9f88-5b54b42a877a\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 09:41:09 crc kubenswrapper[4854]: I1125 09:41:09.847308 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/d0b5231b-f1be-4866-9f88-5b54b42a877a-var-lock\") pod \"installer-9-crc\" (UID: \"d0b5231b-f1be-4866-9f88-5b54b42a877a\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 09:41:09 crc kubenswrapper[4854]: I1125 09:41:09.847469 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d0b5231b-f1be-4866-9f88-5b54b42a877a-kube-api-access\") pod \"installer-9-crc\" (UID: \"d0b5231b-f1be-4866-9f88-5b54b42a877a\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 09:41:09 crc kubenswrapper[4854]: I1125 09:41:09.847511 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d0b5231b-f1be-4866-9f88-5b54b42a877a-kubelet-dir\") pod \"installer-9-crc\" (UID: \"d0b5231b-f1be-4866-9f88-5b54b42a877a\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 09:41:09 crc kubenswrapper[4854]: I1125 09:41:09.847471 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/d0b5231b-f1be-4866-9f88-5b54b42a877a-var-lock\") pod \"installer-9-crc\" (UID: \"d0b5231b-f1be-4866-9f88-5b54b42a877a\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 09:41:09 crc kubenswrapper[4854]: I1125 09:41:09.847651 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d0b5231b-f1be-4866-9f88-5b54b42a877a-kubelet-dir\") pod \"installer-9-crc\" (UID: \"d0b5231b-f1be-4866-9f88-5b54b42a877a\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 09:41:09 crc kubenswrapper[4854]: I1125 09:41:09.873693 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d0b5231b-f1be-4866-9f88-5b54b42a877a-kube-api-access\") pod \"installer-9-crc\" (UID: \"d0b5231b-f1be-4866-9f88-5b54b42a877a\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 09:41:09 crc kubenswrapper[4854]: I1125 09:41:09.977619 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 09:41:10 crc kubenswrapper[4854]: I1125 09:41:10.181693 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 25 09:41:10 crc kubenswrapper[4854]: I1125 09:41:10.675661 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"d0b5231b-f1be-4866-9f88-5b54b42a877a","Type":"ContainerStarted","Data":"f5b88e396f6f8fddcab39dc2c6948e35f2224ef64f651fedda4f70260d27da9a"} Nov 25 09:41:10 crc kubenswrapper[4854]: I1125 09:41:10.676054 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"d0b5231b-f1be-4866-9f88-5b54b42a877a","Type":"ContainerStarted","Data":"c5c1590c485b2dcb8c0eee2f0250c74e2b757bd21d2b335ff325929bac125509"} Nov 25 09:41:16 crc kubenswrapper[4854]: I1125 09:41:16.028948 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-monitoring/metrics-server-7d6666bf55-vhxwx" Nov 25 09:41:16 crc kubenswrapper[4854]: I1125 09:41:16.029418 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/metrics-server-7d6666bf55-vhxwx" Nov 25 09:41:26 crc kubenswrapper[4854]: I1125 09:41:26.327893 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" podUID="974e5caf-5513-4e3e-b8f6-cf67c37b12bb" containerName="registry" containerID="cri-o://8596854e3466b6bff6a11eac4b4063a76d9d227ab6d5c3b6de1c69a4ce7e713e" gracePeriod=30 Nov 25 09:41:26 crc kubenswrapper[4854]: I1125 09:41:26.722352 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:41:26 crc kubenswrapper[4854]: I1125 09:41:26.748956 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=17.748940127 podStartE2EDuration="17.748940127s" podCreationTimestamp="2025-11-25 09:41:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:41:10.701858567 +0000 UTC m=+276.554851973" watchObservedRunningTime="2025-11-25 09:41:26.748940127 +0000 UTC m=+292.601933503" Nov 25 09:41:26 crc kubenswrapper[4854]: I1125 09:41:26.797502 4854 generic.go:334] "Generic (PLEG): container finished" podID="974e5caf-5513-4e3e-b8f6-cf67c37b12bb" containerID="8596854e3466b6bff6a11eac4b4063a76d9d227ab6d5c3b6de1c69a4ce7e713e" exitCode=0 Nov 25 09:41:26 crc kubenswrapper[4854]: I1125 09:41:26.797549 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" event={"ID":"974e5caf-5513-4e3e-b8f6-cf67c37b12bb","Type":"ContainerDied","Data":"8596854e3466b6bff6a11eac4b4063a76d9d227ab6d5c3b6de1c69a4ce7e713e"} Nov 25 09:41:26 crc kubenswrapper[4854]: I1125 09:41:26.797561 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" Nov 25 09:41:26 crc kubenswrapper[4854]: I1125 09:41:26.797585 4854 scope.go:117] "RemoveContainer" containerID="8596854e3466b6bff6a11eac4b4063a76d9d227ab6d5c3b6de1c69a4ce7e713e" Nov 25 09:41:26 crc kubenswrapper[4854]: I1125 09:41:26.797574 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-sd29s" event={"ID":"974e5caf-5513-4e3e-b8f6-cf67c37b12bb","Type":"ContainerDied","Data":"0a00556ecfc0ffa73665188d46ce8b7323ff0249feafef3477d548c58835c3f0"} Nov 25 09:41:26 crc kubenswrapper[4854]: I1125 09:41:26.813898 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-bound-sa-token\") pod \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " Nov 25 09:41:26 crc kubenswrapper[4854]: I1125 09:41:26.813989 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-trusted-ca\") pod \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " Nov 25 09:41:26 crc kubenswrapper[4854]: I1125 09:41:26.814030 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zqknn\" (UniqueName: \"kubernetes.io/projected/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-kube-api-access-zqknn\") pod \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " Nov 25 09:41:26 crc kubenswrapper[4854]: I1125 09:41:26.814087 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-ca-trust-extracted\") pod \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " Nov 25 09:41:26 crc kubenswrapper[4854]: I1125 09:41:26.814129 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-installation-pull-secrets\") pod \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " Nov 25 09:41:26 crc kubenswrapper[4854]: I1125 09:41:26.814361 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " Nov 25 09:41:26 crc kubenswrapper[4854]: I1125 09:41:26.814391 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-registry-certificates\") pod \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " Nov 25 09:41:26 crc kubenswrapper[4854]: I1125 09:41:26.814442 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-registry-tls\") pod \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\" (UID: \"974e5caf-5513-4e3e-b8f6-cf67c37b12bb\") " Nov 25 09:41:26 crc kubenswrapper[4854]: I1125 09:41:26.815939 4854 scope.go:117] "RemoveContainer" containerID="8596854e3466b6bff6a11eac4b4063a76d9d227ab6d5c3b6de1c69a4ce7e713e" Nov 25 09:41:26 crc kubenswrapper[4854]: E1125 09:41:26.817770 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8596854e3466b6bff6a11eac4b4063a76d9d227ab6d5c3b6de1c69a4ce7e713e\": container with ID starting with 8596854e3466b6bff6a11eac4b4063a76d9d227ab6d5c3b6de1c69a4ce7e713e not found: ID does not exist" containerID="8596854e3466b6bff6a11eac4b4063a76d9d227ab6d5c3b6de1c69a4ce7e713e" Nov 25 09:41:26 crc kubenswrapper[4854]: I1125 09:41:26.817812 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8596854e3466b6bff6a11eac4b4063a76d9d227ab6d5c3b6de1c69a4ce7e713e"} err="failed to get container status \"8596854e3466b6bff6a11eac4b4063a76d9d227ab6d5c3b6de1c69a4ce7e713e\": rpc error: code = NotFound desc = could not find container \"8596854e3466b6bff6a11eac4b4063a76d9d227ab6d5c3b6de1c69a4ce7e713e\": container with ID starting with 8596854e3466b6bff6a11eac4b4063a76d9d227ab6d5c3b6de1c69a4ce7e713e not found: ID does not exist" Nov 25 09:41:26 crc kubenswrapper[4854]: I1125 09:41:26.818270 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "974e5caf-5513-4e3e-b8f6-cf67c37b12bb" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:41:26 crc kubenswrapper[4854]: I1125 09:41:26.820446 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "974e5caf-5513-4e3e-b8f6-cf67c37b12bb" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:41:26 crc kubenswrapper[4854]: I1125 09:41:26.822163 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-kube-api-access-zqknn" (OuterVolumeSpecName: "kube-api-access-zqknn") pod "974e5caf-5513-4e3e-b8f6-cf67c37b12bb" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb"). InnerVolumeSpecName "kube-api-access-zqknn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:41:26 crc kubenswrapper[4854]: I1125 09:41:26.822409 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "974e5caf-5513-4e3e-b8f6-cf67c37b12bb" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:41:26 crc kubenswrapper[4854]: I1125 09:41:26.823872 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "974e5caf-5513-4e3e-b8f6-cf67c37b12bb" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:41:26 crc kubenswrapper[4854]: I1125 09:41:26.824044 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "974e5caf-5513-4e3e-b8f6-cf67c37b12bb" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:41:26 crc kubenswrapper[4854]: I1125 09:41:26.831083 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "974e5caf-5513-4e3e-b8f6-cf67c37b12bb" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 25 09:41:26 crc kubenswrapper[4854]: I1125 09:41:26.833477 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "974e5caf-5513-4e3e-b8f6-cf67c37b12bb" (UID: "974e5caf-5513-4e3e-b8f6-cf67c37b12bb"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:41:26 crc kubenswrapper[4854]: I1125 09:41:26.915528 4854 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 25 09:41:26 crc kubenswrapper[4854]: I1125 09:41:26.915562 4854 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 25 09:41:26 crc kubenswrapper[4854]: I1125 09:41:26.915574 4854 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:41:26 crc kubenswrapper[4854]: I1125 09:41:26.915583 4854 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 09:41:26 crc kubenswrapper[4854]: I1125 09:41:26.915591 4854 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:41:26 crc kubenswrapper[4854]: I1125 09:41:26.915600 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zqknn\" (UniqueName: \"kubernetes.io/projected/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-kube-api-access-zqknn\") on node \"crc\" DevicePath \"\"" Nov 25 09:41:26 crc kubenswrapper[4854]: I1125 09:41:26.915609 4854 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/974e5caf-5513-4e3e-b8f6-cf67c37b12bb-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 25 09:41:27 crc kubenswrapper[4854]: I1125 09:41:27.131433 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-sd29s"] Nov 25 09:41:27 crc kubenswrapper[4854]: I1125 09:41:27.139370 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-sd29s"] Nov 25 09:41:29 crc kubenswrapper[4854]: I1125 09:41:29.023330 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="974e5caf-5513-4e3e-b8f6-cf67c37b12bb" path="/var/lib/kubelet/pods/974e5caf-5513-4e3e-b8f6-cf67c37b12bb/volumes" Nov 25 09:41:30 crc kubenswrapper[4854]: I1125 09:41:30.763761 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-m5689" podUID="97e545f8-81c2-400b-a339-b2b3a1958492" containerName="console" containerID="cri-o://69c8799d3c676a5d8fbe8cb73a69682373e72a3b4e81a047c0bf95157e1ce0e6" gracePeriod=15 Nov 25 09:41:31 crc kubenswrapper[4854]: I1125 09:41:31.097230 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-m5689_97e545f8-81c2-400b-a339-b2b3a1958492/console/0.log" Nov 25 09:41:31 crc kubenswrapper[4854]: I1125 09:41:31.097532 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-m5689" Nov 25 09:41:31 crc kubenswrapper[4854]: I1125 09:41:31.181577 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wzngf\" (UniqueName: \"kubernetes.io/projected/97e545f8-81c2-400b-a339-b2b3a1958492-kube-api-access-wzngf\") pod \"97e545f8-81c2-400b-a339-b2b3a1958492\" (UID: \"97e545f8-81c2-400b-a339-b2b3a1958492\") " Nov 25 09:41:31 crc kubenswrapper[4854]: I1125 09:41:31.181766 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/97e545f8-81c2-400b-a339-b2b3a1958492-service-ca\") pod \"97e545f8-81c2-400b-a339-b2b3a1958492\" (UID: \"97e545f8-81c2-400b-a339-b2b3a1958492\") " Nov 25 09:41:31 crc kubenswrapper[4854]: I1125 09:41:31.181814 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/97e545f8-81c2-400b-a339-b2b3a1958492-trusted-ca-bundle\") pod \"97e545f8-81c2-400b-a339-b2b3a1958492\" (UID: \"97e545f8-81c2-400b-a339-b2b3a1958492\") " Nov 25 09:41:31 crc kubenswrapper[4854]: I1125 09:41:31.181938 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/97e545f8-81c2-400b-a339-b2b3a1958492-oauth-serving-cert\") pod \"97e545f8-81c2-400b-a339-b2b3a1958492\" (UID: \"97e545f8-81c2-400b-a339-b2b3a1958492\") " Nov 25 09:41:31 crc kubenswrapper[4854]: I1125 09:41:31.182697 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/97e545f8-81c2-400b-a339-b2b3a1958492-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "97e545f8-81c2-400b-a339-b2b3a1958492" (UID: "97e545f8-81c2-400b-a339-b2b3a1958492"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:41:31 crc kubenswrapper[4854]: I1125 09:41:31.182721 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/97e545f8-81c2-400b-a339-b2b3a1958492-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "97e545f8-81c2-400b-a339-b2b3a1958492" (UID: "97e545f8-81c2-400b-a339-b2b3a1958492"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:41:31 crc kubenswrapper[4854]: I1125 09:41:31.182748 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/97e545f8-81c2-400b-a339-b2b3a1958492-service-ca" (OuterVolumeSpecName: "service-ca") pod "97e545f8-81c2-400b-a339-b2b3a1958492" (UID: "97e545f8-81c2-400b-a339-b2b3a1958492"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:41:31 crc kubenswrapper[4854]: I1125 09:41:31.182876 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/97e545f8-81c2-400b-a339-b2b3a1958492-console-config\") pod \"97e545f8-81c2-400b-a339-b2b3a1958492\" (UID: \"97e545f8-81c2-400b-a339-b2b3a1958492\") " Nov 25 09:41:31 crc kubenswrapper[4854]: I1125 09:41:31.182963 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/97e545f8-81c2-400b-a339-b2b3a1958492-console-oauth-config\") pod \"97e545f8-81c2-400b-a339-b2b3a1958492\" (UID: \"97e545f8-81c2-400b-a339-b2b3a1958492\") " Nov 25 09:41:31 crc kubenswrapper[4854]: I1125 09:41:31.183019 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/97e545f8-81c2-400b-a339-b2b3a1958492-console-serving-cert\") pod \"97e545f8-81c2-400b-a339-b2b3a1958492\" (UID: \"97e545f8-81c2-400b-a339-b2b3a1958492\") " Nov 25 09:41:31 crc kubenswrapper[4854]: I1125 09:41:31.183583 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/97e545f8-81c2-400b-a339-b2b3a1958492-console-config" (OuterVolumeSpecName: "console-config") pod "97e545f8-81c2-400b-a339-b2b3a1958492" (UID: "97e545f8-81c2-400b-a339-b2b3a1958492"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:41:31 crc kubenswrapper[4854]: I1125 09:41:31.184194 4854 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/97e545f8-81c2-400b-a339-b2b3a1958492-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:41:31 crc kubenswrapper[4854]: I1125 09:41:31.184220 4854 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/97e545f8-81c2-400b-a339-b2b3a1958492-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:41:31 crc kubenswrapper[4854]: I1125 09:41:31.184236 4854 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/97e545f8-81c2-400b-a339-b2b3a1958492-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:41:31 crc kubenswrapper[4854]: I1125 09:41:31.184249 4854 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/97e545f8-81c2-400b-a339-b2b3a1958492-console-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:41:31 crc kubenswrapper[4854]: I1125 09:41:31.187473 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97e545f8-81c2-400b-a339-b2b3a1958492-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "97e545f8-81c2-400b-a339-b2b3a1958492" (UID: "97e545f8-81c2-400b-a339-b2b3a1958492"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:41:31 crc kubenswrapper[4854]: I1125 09:41:31.188096 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97e545f8-81c2-400b-a339-b2b3a1958492-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "97e545f8-81c2-400b-a339-b2b3a1958492" (UID: "97e545f8-81c2-400b-a339-b2b3a1958492"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:41:31 crc kubenswrapper[4854]: I1125 09:41:31.189049 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97e545f8-81c2-400b-a339-b2b3a1958492-kube-api-access-wzngf" (OuterVolumeSpecName: "kube-api-access-wzngf") pod "97e545f8-81c2-400b-a339-b2b3a1958492" (UID: "97e545f8-81c2-400b-a339-b2b3a1958492"). InnerVolumeSpecName "kube-api-access-wzngf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:41:31 crc kubenswrapper[4854]: I1125 09:41:31.285281 4854 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/97e545f8-81c2-400b-a339-b2b3a1958492-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:41:31 crc kubenswrapper[4854]: I1125 09:41:31.285321 4854 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/97e545f8-81c2-400b-a339-b2b3a1958492-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:41:31 crc kubenswrapper[4854]: I1125 09:41:31.285332 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wzngf\" (UniqueName: \"kubernetes.io/projected/97e545f8-81c2-400b-a339-b2b3a1958492-kube-api-access-wzngf\") on node \"crc\" DevicePath \"\"" Nov 25 09:41:31 crc kubenswrapper[4854]: I1125 09:41:31.832187 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-m5689_97e545f8-81c2-400b-a339-b2b3a1958492/console/0.log" Nov 25 09:41:31 crc kubenswrapper[4854]: I1125 09:41:31.832558 4854 generic.go:334] "Generic (PLEG): container finished" podID="97e545f8-81c2-400b-a339-b2b3a1958492" containerID="69c8799d3c676a5d8fbe8cb73a69682373e72a3b4e81a047c0bf95157e1ce0e6" exitCode=2 Nov 25 09:41:31 crc kubenswrapper[4854]: I1125 09:41:31.832595 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-m5689" event={"ID":"97e545f8-81c2-400b-a339-b2b3a1958492","Type":"ContainerDied","Data":"69c8799d3c676a5d8fbe8cb73a69682373e72a3b4e81a047c0bf95157e1ce0e6"} Nov 25 09:41:31 crc kubenswrapper[4854]: I1125 09:41:31.832623 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-m5689" event={"ID":"97e545f8-81c2-400b-a339-b2b3a1958492","Type":"ContainerDied","Data":"4194399735ad3d15c6809261046f71ab943d9557f41bc896de4c2e1fb13ab91e"} Nov 25 09:41:31 crc kubenswrapper[4854]: I1125 09:41:31.832641 4854 scope.go:117] "RemoveContainer" containerID="69c8799d3c676a5d8fbe8cb73a69682373e72a3b4e81a047c0bf95157e1ce0e6" Nov 25 09:41:31 crc kubenswrapper[4854]: I1125 09:41:31.832663 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-m5689" Nov 25 09:41:31 crc kubenswrapper[4854]: I1125 09:41:31.855828 4854 scope.go:117] "RemoveContainer" containerID="69c8799d3c676a5d8fbe8cb73a69682373e72a3b4e81a047c0bf95157e1ce0e6" Nov 25 09:41:31 crc kubenswrapper[4854]: E1125 09:41:31.856719 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"69c8799d3c676a5d8fbe8cb73a69682373e72a3b4e81a047c0bf95157e1ce0e6\": container with ID starting with 69c8799d3c676a5d8fbe8cb73a69682373e72a3b4e81a047c0bf95157e1ce0e6 not found: ID does not exist" containerID="69c8799d3c676a5d8fbe8cb73a69682373e72a3b4e81a047c0bf95157e1ce0e6" Nov 25 09:41:31 crc kubenswrapper[4854]: I1125 09:41:31.856765 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"69c8799d3c676a5d8fbe8cb73a69682373e72a3b4e81a047c0bf95157e1ce0e6"} err="failed to get container status \"69c8799d3c676a5d8fbe8cb73a69682373e72a3b4e81a047c0bf95157e1ce0e6\": rpc error: code = NotFound desc = could not find container \"69c8799d3c676a5d8fbe8cb73a69682373e72a3b4e81a047c0bf95157e1ce0e6\": container with ID starting with 69c8799d3c676a5d8fbe8cb73a69682373e72a3b4e81a047c0bf95157e1ce0e6 not found: ID does not exist" Nov 25 09:41:31 crc kubenswrapper[4854]: I1125 09:41:31.861349 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-m5689"] Nov 25 09:41:31 crc kubenswrapper[4854]: I1125 09:41:31.865022 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-m5689"] Nov 25 09:41:33 crc kubenswrapper[4854]: I1125 09:41:33.019531 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="97e545f8-81c2-400b-a339-b2b3a1958492" path="/var/lib/kubelet/pods/97e545f8-81c2-400b-a339-b2b3a1958492/volumes" Nov 25 09:41:34 crc kubenswrapper[4854]: I1125 09:41:34.799080 4854 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Nov 25 09:41:36 crc kubenswrapper[4854]: I1125 09:41:36.033699 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-monitoring/metrics-server-7d6666bf55-vhxwx" Nov 25 09:41:36 crc kubenswrapper[4854]: I1125 09:41:36.038617 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/metrics-server-7d6666bf55-vhxwx" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.359379 4854 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 09:41:48 crc kubenswrapper[4854]: E1125 09:41:48.361200 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="974e5caf-5513-4e3e-b8f6-cf67c37b12bb" containerName="registry" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.361308 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="974e5caf-5513-4e3e-b8f6-cf67c37b12bb" containerName="registry" Nov 25 09:41:48 crc kubenswrapper[4854]: E1125 09:41:48.361446 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97e545f8-81c2-400b-a339-b2b3a1958492" containerName="console" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.361536 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="97e545f8-81c2-400b-a339-b2b3a1958492" containerName="console" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.361753 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="974e5caf-5513-4e3e-b8f6-cf67c37b12bb" containerName="registry" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.361868 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="97e545f8-81c2-400b-a339-b2b3a1958492" containerName="console" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.362323 4854 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.362438 4854 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.362514 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 09:41:48 crc kubenswrapper[4854]: E1125 09:41:48.362789 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.362883 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.362959 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831" gracePeriod=15 Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.362996 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e" gracePeriod=15 Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.362938 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89" gracePeriod=15 Nov 25 09:41:48 crc kubenswrapper[4854]: E1125 09:41:48.362979 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.363207 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 25 09:41:48 crc kubenswrapper[4854]: E1125 09:41:48.363224 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.363233 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.362878 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898" gracePeriod=15 Nov 25 09:41:48 crc kubenswrapper[4854]: E1125 09:41:48.363243 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.363376 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 09:41:48 crc kubenswrapper[4854]: E1125 09:41:48.363389 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.363397 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 25 09:41:48 crc kubenswrapper[4854]: E1125 09:41:48.363413 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.363421 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 25 09:41:48 crc kubenswrapper[4854]: E1125 09:41:48.363432 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.363440 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.363579 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.363596 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.363603 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.363613 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.363662 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.363942 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.362935 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8" gracePeriod=15 Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.366884 4854 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="f4b27818a5e8e43d0dc095d08835c792" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.425450 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.458560 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.458626 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.458656 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.458747 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.458782 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.458815 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.458846 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.458875 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.560811 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.561212 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.561272 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.561322 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.561361 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.561387 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.561412 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.561457 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.561547 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.560996 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.561604 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.561631 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.561658 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.561709 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.561737 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.561763 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 09:41:48 crc kubenswrapper[4854]: I1125 09:41:48.708827 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 09:41:49 crc kubenswrapper[4854]: E1125 09:41:48.756881 4854 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.184:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187b3698c188a014 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 09:41:48.755566612 +0000 UTC m=+314.608559988,LastTimestamp:2025-11-25 09:41:48.755566612 +0000 UTC m=+314.608559988,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 09:41:49 crc kubenswrapper[4854]: I1125 09:41:48.933061 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 25 09:41:49 crc kubenswrapper[4854]: I1125 09:41:48.934794 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 09:41:49 crc kubenswrapper[4854]: I1125 09:41:48.935565 4854 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8" exitCode=0 Nov 25 09:41:49 crc kubenswrapper[4854]: I1125 09:41:48.935593 4854 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89" exitCode=0 Nov 25 09:41:49 crc kubenswrapper[4854]: I1125 09:41:48.935602 4854 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831" exitCode=0 Nov 25 09:41:49 crc kubenswrapper[4854]: I1125 09:41:48.935612 4854 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e" exitCode=2 Nov 25 09:41:49 crc kubenswrapper[4854]: I1125 09:41:48.935688 4854 scope.go:117] "RemoveContainer" containerID="0acb99924688ba78a1ab512edf47526e40ab721e59dd78e592d3e2972a32c5e1" Nov 25 09:41:49 crc kubenswrapper[4854]: I1125 09:41:48.937323 4854 generic.go:334] "Generic (PLEG): container finished" podID="d0b5231b-f1be-4866-9f88-5b54b42a877a" containerID="f5b88e396f6f8fddcab39dc2c6948e35f2224ef64f651fedda4f70260d27da9a" exitCode=0 Nov 25 09:41:49 crc kubenswrapper[4854]: I1125 09:41:48.937437 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"d0b5231b-f1be-4866-9f88-5b54b42a877a","Type":"ContainerDied","Data":"f5b88e396f6f8fddcab39dc2c6948e35f2224ef64f651fedda4f70260d27da9a"} Nov 25 09:41:49 crc kubenswrapper[4854]: I1125 09:41:48.938129 4854 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.184:6443: connect: connection refused" Nov 25 09:41:49 crc kubenswrapper[4854]: I1125 09:41:48.938357 4854 status_manager.go:851] "Failed to get status for pod" podUID="d0b5231b-f1be-4866-9f88-5b54b42a877a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.184:6443: connect: connection refused" Nov 25 09:41:49 crc kubenswrapper[4854]: I1125 09:41:48.938695 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"d0a0b7383f3374004c2161bc14fe5cf30823496a709153f9de69397915dd57f1"} Nov 25 09:41:49 crc kubenswrapper[4854]: I1125 09:41:49.946509 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"968b0e5b09ef506df42aba41246c0aa9119b890613348d86e9542ba02a80ec07"} Nov 25 09:41:49 crc kubenswrapper[4854]: I1125 09:41:49.948145 4854 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.184:6443: connect: connection refused" Nov 25 09:41:49 crc kubenswrapper[4854]: I1125 09:41:49.948481 4854 status_manager.go:851] "Failed to get status for pod" podUID="d0b5231b-f1be-4866-9f88-5b54b42a877a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.184:6443: connect: connection refused" Nov 25 09:41:49 crc kubenswrapper[4854]: I1125 09:41:49.949606 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 09:41:50 crc kubenswrapper[4854]: I1125 09:41:50.163015 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 09:41:50 crc kubenswrapper[4854]: I1125 09:41:50.164014 4854 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.184:6443: connect: connection refused" Nov 25 09:41:50 crc kubenswrapper[4854]: I1125 09:41:50.164346 4854 status_manager.go:851] "Failed to get status for pod" podUID="d0b5231b-f1be-4866-9f88-5b54b42a877a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.184:6443: connect: connection refused" Nov 25 09:41:50 crc kubenswrapper[4854]: I1125 09:41:50.287896 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d0b5231b-f1be-4866-9f88-5b54b42a877a-kubelet-dir\") pod \"d0b5231b-f1be-4866-9f88-5b54b42a877a\" (UID: \"d0b5231b-f1be-4866-9f88-5b54b42a877a\") " Nov 25 09:41:50 crc kubenswrapper[4854]: I1125 09:41:50.287985 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/d0b5231b-f1be-4866-9f88-5b54b42a877a-var-lock\") pod \"d0b5231b-f1be-4866-9f88-5b54b42a877a\" (UID: \"d0b5231b-f1be-4866-9f88-5b54b42a877a\") " Nov 25 09:41:50 crc kubenswrapper[4854]: I1125 09:41:50.288036 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d0b5231b-f1be-4866-9f88-5b54b42a877a-kube-api-access\") pod \"d0b5231b-f1be-4866-9f88-5b54b42a877a\" (UID: \"d0b5231b-f1be-4866-9f88-5b54b42a877a\") " Nov 25 09:41:50 crc kubenswrapper[4854]: I1125 09:41:50.288016 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d0b5231b-f1be-4866-9f88-5b54b42a877a-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "d0b5231b-f1be-4866-9f88-5b54b42a877a" (UID: "d0b5231b-f1be-4866-9f88-5b54b42a877a"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:41:50 crc kubenswrapper[4854]: I1125 09:41:50.288056 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d0b5231b-f1be-4866-9f88-5b54b42a877a-var-lock" (OuterVolumeSpecName: "var-lock") pod "d0b5231b-f1be-4866-9f88-5b54b42a877a" (UID: "d0b5231b-f1be-4866-9f88-5b54b42a877a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:41:50 crc kubenswrapper[4854]: I1125 09:41:50.288522 4854 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d0b5231b-f1be-4866-9f88-5b54b42a877a-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 09:41:50 crc kubenswrapper[4854]: I1125 09:41:50.288543 4854 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/d0b5231b-f1be-4866-9f88-5b54b42a877a-var-lock\") on node \"crc\" DevicePath \"\"" Nov 25 09:41:50 crc kubenswrapper[4854]: I1125 09:41:50.296036 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0b5231b-f1be-4866-9f88-5b54b42a877a-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "d0b5231b-f1be-4866-9f88-5b54b42a877a" (UID: "d0b5231b-f1be-4866-9f88-5b54b42a877a"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:41:50 crc kubenswrapper[4854]: I1125 09:41:50.390200 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d0b5231b-f1be-4866-9f88-5b54b42a877a-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 09:41:50 crc kubenswrapper[4854]: I1125 09:41:50.960571 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 09:41:50 crc kubenswrapper[4854]: I1125 09:41:50.961884 4854 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898" exitCode=0 Nov 25 09:41:50 crc kubenswrapper[4854]: I1125 09:41:50.963016 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"d0b5231b-f1be-4866-9f88-5b54b42a877a","Type":"ContainerDied","Data":"c5c1590c485b2dcb8c0eee2f0250c74e2b757bd21d2b335ff325929bac125509"} Nov 25 09:41:50 crc kubenswrapper[4854]: I1125 09:41:50.963057 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c5c1590c485b2dcb8c0eee2f0250c74e2b757bd21d2b335ff325929bac125509" Nov 25 09:41:50 crc kubenswrapper[4854]: I1125 09:41:50.963058 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 09:41:50 crc kubenswrapper[4854]: I1125 09:41:50.986464 4854 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.184:6443: connect: connection refused" Nov 25 09:41:50 crc kubenswrapper[4854]: I1125 09:41:50.986993 4854 status_manager.go:851] "Failed to get status for pod" podUID="d0b5231b-f1be-4866-9f88-5b54b42a877a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.184:6443: connect: connection refused" Nov 25 09:41:51 crc kubenswrapper[4854]: I1125 09:41:51.253965 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 09:41:51 crc kubenswrapper[4854]: I1125 09:41:51.254914 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:41:51 crc kubenswrapper[4854]: I1125 09:41:51.255519 4854 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.184:6443: connect: connection refused" Nov 25 09:41:51 crc kubenswrapper[4854]: I1125 09:41:51.255958 4854 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.184:6443: connect: connection refused" Nov 25 09:41:51 crc kubenswrapper[4854]: I1125 09:41:51.256276 4854 status_manager.go:851] "Failed to get status for pod" podUID="d0b5231b-f1be-4866-9f88-5b54b42a877a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.184:6443: connect: connection refused" Nov 25 09:41:51 crc kubenswrapper[4854]: I1125 09:41:51.405141 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 25 09:41:51 crc kubenswrapper[4854]: I1125 09:41:51.405256 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 25 09:41:51 crc kubenswrapper[4854]: I1125 09:41:51.405334 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 25 09:41:51 crc kubenswrapper[4854]: I1125 09:41:51.405444 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:41:51 crc kubenswrapper[4854]: I1125 09:41:51.405455 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:41:51 crc kubenswrapper[4854]: I1125 09:41:51.405481 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:41:51 crc kubenswrapper[4854]: I1125 09:41:51.405814 4854 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 25 09:41:51 crc kubenswrapper[4854]: I1125 09:41:51.405895 4854 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 25 09:41:51 crc kubenswrapper[4854]: I1125 09:41:51.405960 4854 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Nov 25 09:41:51 crc kubenswrapper[4854]: I1125 09:41:51.972315 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 09:41:51 crc kubenswrapper[4854]: I1125 09:41:51.974835 4854 scope.go:117] "RemoveContainer" containerID="5cf60028a28d9112b7b371a0bbd604cdd7b6fcad5d126c5ba31c910f9bc603a8" Nov 25 09:41:51 crc kubenswrapper[4854]: I1125 09:41:51.974926 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:41:51 crc kubenswrapper[4854]: I1125 09:41:51.999080 4854 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.184:6443: connect: connection refused" Nov 25 09:41:51 crc kubenswrapper[4854]: I1125 09:41:51.999373 4854 status_manager.go:851] "Failed to get status for pod" podUID="d0b5231b-f1be-4866-9f88-5b54b42a877a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.184:6443: connect: connection refused" Nov 25 09:41:52 crc kubenswrapper[4854]: I1125 09:41:51.999836 4854 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.184:6443: connect: connection refused" Nov 25 09:41:52 crc kubenswrapper[4854]: I1125 09:41:52.044504 4854 scope.go:117] "RemoveContainer" containerID="5f6983920beee2065127c592be1d3df413eaccf6b23fd7bc6b97e1e2e6442c89" Nov 25 09:41:52 crc kubenswrapper[4854]: I1125 09:41:52.456661 4854 scope.go:117] "RemoveContainer" containerID="4946d478b19a353aa6e9d3268397cd7cd22bdccb0e5db91ed2ed9288e5cc8831" Nov 25 09:41:52 crc kubenswrapper[4854]: I1125 09:41:52.471818 4854 scope.go:117] "RemoveContainer" containerID="359a00e28146bf169b60245f5fb1b5030e495b2991d9e477da494a0d5b413c3e" Nov 25 09:41:52 crc kubenswrapper[4854]: I1125 09:41:52.486700 4854 scope.go:117] "RemoveContainer" containerID="dc2c950cf37abab28877958f1602d57ddb14213ddef67fcb6f23c0f9019c5898" Nov 25 09:41:52 crc kubenswrapper[4854]: I1125 09:41:52.504316 4854 scope.go:117] "RemoveContainer" containerID="8e6b18950b74c813223773942da5b256fffa45ea0f3f5cda12262fc030cc164c" Nov 25 09:41:53 crc kubenswrapper[4854]: I1125 09:41:53.020964 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Nov 25 09:41:53 crc kubenswrapper[4854]: E1125 09:41:53.339444 4854 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.184:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187b3698c188a014 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 09:41:48.755566612 +0000 UTC m=+314.608559988,LastTimestamp:2025-11-25 09:41:48.755566612 +0000 UTC m=+314.608559988,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 09:41:55 crc kubenswrapper[4854]: I1125 09:41:55.020312 4854 status_manager.go:851] "Failed to get status for pod" podUID="d0b5231b-f1be-4866-9f88-5b54b42a877a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.184:6443: connect: connection refused" Nov 25 09:41:55 crc kubenswrapper[4854]: I1125 09:41:55.021015 4854 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.184:6443: connect: connection refused" Nov 25 09:41:56 crc kubenswrapper[4854]: I1125 09:41:56.753027 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:41:56 crc kubenswrapper[4854]: I1125 09:41:56.789192 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:41:56 crc kubenswrapper[4854]: I1125 09:41:56.790121 4854 status_manager.go:851] "Failed to get status for pod" podUID="d0b5231b-f1be-4866-9f88-5b54b42a877a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.184:6443: connect: connection refused" Nov 25 09:41:56 crc kubenswrapper[4854]: I1125 09:41:56.790455 4854 status_manager.go:851] "Failed to get status for pod" podUID="b825aef9-6502-4376-85ce-1fab5a42200a" pod="openshift-monitoring/prometheus-k8s-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-monitoring/pods/prometheus-k8s-0\": dial tcp 38.102.83.184:6443: connect: connection refused" Nov 25 09:41:56 crc kubenswrapper[4854]: I1125 09:41:56.790794 4854 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.184:6443: connect: connection refused" Nov 25 09:41:57 crc kubenswrapper[4854]: I1125 09:41:57.032601 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:41:57 crc kubenswrapper[4854]: I1125 09:41:57.033185 4854 status_manager.go:851] "Failed to get status for pod" podUID="b825aef9-6502-4376-85ce-1fab5a42200a" pod="openshift-monitoring/prometheus-k8s-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-monitoring/pods/prometheus-k8s-0\": dial tcp 38.102.83.184:6443: connect: connection refused" Nov 25 09:41:57 crc kubenswrapper[4854]: I1125 09:41:57.033685 4854 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.184:6443: connect: connection refused" Nov 25 09:41:57 crc kubenswrapper[4854]: I1125 09:41:57.034107 4854 status_manager.go:851] "Failed to get status for pod" podUID="d0b5231b-f1be-4866-9f88-5b54b42a877a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.184:6443: connect: connection refused" Nov 25 09:41:57 crc kubenswrapper[4854]: E1125 09:41:57.760573 4854 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.184:6443: connect: connection refused" Nov 25 09:41:57 crc kubenswrapper[4854]: E1125 09:41:57.761111 4854 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.184:6443: connect: connection refused" Nov 25 09:41:57 crc kubenswrapper[4854]: E1125 09:41:57.761286 4854 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.184:6443: connect: connection refused" Nov 25 09:41:57 crc kubenswrapper[4854]: E1125 09:41:57.761430 4854 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.184:6443: connect: connection refused" Nov 25 09:41:57 crc kubenswrapper[4854]: E1125 09:41:57.761591 4854 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.184:6443: connect: connection refused" Nov 25 09:41:57 crc kubenswrapper[4854]: I1125 09:41:57.761617 4854 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Nov 25 09:41:57 crc kubenswrapper[4854]: E1125 09:41:57.761907 4854 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.184:6443: connect: connection refused" interval="200ms" Nov 25 09:41:57 crc kubenswrapper[4854]: E1125 09:41:57.963202 4854 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.184:6443: connect: connection refused" interval="400ms" Nov 25 09:41:58 crc kubenswrapper[4854]: E1125 09:41:58.364701 4854 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.184:6443: connect: connection refused" interval="800ms" Nov 25 09:41:59 crc kubenswrapper[4854]: E1125 09:41:59.166260 4854 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.184:6443: connect: connection refused" interval="1.6s" Nov 25 09:42:00 crc kubenswrapper[4854]: I1125 09:42:00.012547 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:42:00 crc kubenswrapper[4854]: I1125 09:42:00.013366 4854 status_manager.go:851] "Failed to get status for pod" podUID="b825aef9-6502-4376-85ce-1fab5a42200a" pod="openshift-monitoring/prometheus-k8s-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-monitoring/pods/prometheus-k8s-0\": dial tcp 38.102.83.184:6443: connect: connection refused" Nov 25 09:42:00 crc kubenswrapper[4854]: I1125 09:42:00.013610 4854 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.184:6443: connect: connection refused" Nov 25 09:42:00 crc kubenswrapper[4854]: I1125 09:42:00.013868 4854 status_manager.go:851] "Failed to get status for pod" podUID="d0b5231b-f1be-4866-9f88-5b54b42a877a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.184:6443: connect: connection refused" Nov 25 09:42:00 crc kubenswrapper[4854]: I1125 09:42:00.040449 4854 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="36afbdef-e971-4c88-b8fd-0f289b9dd07c" Nov 25 09:42:00 crc kubenswrapper[4854]: I1125 09:42:00.040488 4854 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="36afbdef-e971-4c88-b8fd-0f289b9dd07c" Nov 25 09:42:00 crc kubenswrapper[4854]: E1125 09:42:00.040999 4854 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.184:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:42:00 crc kubenswrapper[4854]: I1125 09:42:00.041698 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:42:00 crc kubenswrapper[4854]: E1125 09:42:00.767486 4854 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.184:6443: connect: connection refused" interval="3.2s" Nov 25 09:42:01 crc kubenswrapper[4854]: I1125 09:42:01.029275 4854 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="5a4f4974812cb00dcf773bff390ac5009efa8801fd159e3820642ffc04e79581" exitCode=0 Nov 25 09:42:01 crc kubenswrapper[4854]: I1125 09:42:01.029345 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"5a4f4974812cb00dcf773bff390ac5009efa8801fd159e3820642ffc04e79581"} Nov 25 09:42:01 crc kubenswrapper[4854]: I1125 09:42:01.029381 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"e7bfbbebdcee35537cb4f17a98656258d2b006994a0c0b15f5952eeccba0248b"} Nov 25 09:42:01 crc kubenswrapper[4854]: I1125 09:42:01.030091 4854 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="36afbdef-e971-4c88-b8fd-0f289b9dd07c" Nov 25 09:42:01 crc kubenswrapper[4854]: I1125 09:42:01.030195 4854 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="36afbdef-e971-4c88-b8fd-0f289b9dd07c" Nov 25 09:42:01 crc kubenswrapper[4854]: I1125 09:42:01.030359 4854 status_manager.go:851] "Failed to get status for pod" podUID="b825aef9-6502-4376-85ce-1fab5a42200a" pod="openshift-monitoring/prometheus-k8s-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-monitoring/pods/prometheus-k8s-0\": dial tcp 38.102.83.184:6443: connect: connection refused" Nov 25 09:42:01 crc kubenswrapper[4854]: E1125 09:42:01.030930 4854 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.184:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:42:01 crc kubenswrapper[4854]: I1125 09:42:01.031134 4854 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.184:6443: connect: connection refused" Nov 25 09:42:01 crc kubenswrapper[4854]: I1125 09:42:01.031711 4854 status_manager.go:851] "Failed to get status for pod" podUID="d0b5231b-f1be-4866-9f88-5b54b42a877a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.184:6443: connect: connection refused" Nov 25 09:42:02 crc kubenswrapper[4854]: I1125 09:42:02.050567 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"1bdfedf9b9845820879f0eebd3a04ef902cde5c9200162a4a6a03f51da08c1a1"} Nov 25 09:42:02 crc kubenswrapper[4854]: I1125 09:42:02.051140 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"820d78a61019c473a3566d21934c60670cf53561c89ca1e490722cf641066f6b"} Nov 25 09:42:02 crc kubenswrapper[4854]: I1125 09:42:02.051160 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"0a8a7d2a132a2c8b249951668b72b605b2d8e8e77f9f526663dad8d9d011f121"} Nov 25 09:42:03 crc kubenswrapper[4854]: I1125 09:42:03.067515 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"5fb6e65f99294b2df64e46ccf23fe2e4c7038aad863aec58ec952ade654a75f3"} Nov 25 09:42:03 crc kubenswrapper[4854]: I1125 09:42:03.067567 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"dca071489740cdb0f7d8f7d06a1c4bf5ed324768fd33cba8fa33e46e53e5c6e3"} Nov 25 09:42:03 crc kubenswrapper[4854]: I1125 09:42:03.067781 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:42:03 crc kubenswrapper[4854]: I1125 09:42:03.067855 4854 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="36afbdef-e971-4c88-b8fd-0f289b9dd07c" Nov 25 09:42:03 crc kubenswrapper[4854]: I1125 09:42:03.067876 4854 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="36afbdef-e971-4c88-b8fd-0f289b9dd07c" Nov 25 09:42:04 crc kubenswrapper[4854]: I1125 09:42:04.075329 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 25 09:42:04 crc kubenswrapper[4854]: I1125 09:42:04.075635 4854 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116" exitCode=1 Nov 25 09:42:04 crc kubenswrapper[4854]: I1125 09:42:04.075685 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116"} Nov 25 09:42:04 crc kubenswrapper[4854]: I1125 09:42:04.076251 4854 scope.go:117] "RemoveContainer" containerID="d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116" Nov 25 09:42:05 crc kubenswrapper[4854]: I1125 09:42:05.041958 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:42:05 crc kubenswrapper[4854]: I1125 09:42:05.042017 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:42:05 crc kubenswrapper[4854]: I1125 09:42:05.047928 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:42:05 crc kubenswrapper[4854]: I1125 09:42:05.088085 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 25 09:42:05 crc kubenswrapper[4854]: I1125 09:42:05.088791 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"de27b1ae4d5eadf1bb59fa7e6ffaa41880feb6f0bb2e835e7e8754b82966f217"} Nov 25 09:42:06 crc kubenswrapper[4854]: I1125 09:42:06.951054 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:42:08 crc kubenswrapper[4854]: I1125 09:42:08.077584 4854 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:42:08 crc kubenswrapper[4854]: I1125 09:42:08.116511 4854 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="36afbdef-e971-4c88-b8fd-0f289b9dd07c" Nov 25 09:42:08 crc kubenswrapper[4854]: I1125 09:42:08.116560 4854 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="36afbdef-e971-4c88-b8fd-0f289b9dd07c" Nov 25 09:42:08 crc kubenswrapper[4854]: I1125 09:42:08.119956 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:42:08 crc kubenswrapper[4854]: I1125 09:42:08.162012 4854 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="b48a9d73-ed9d-4ce4-b404-549b18e25a15" Nov 25 09:42:09 crc kubenswrapper[4854]: I1125 09:42:09.121943 4854 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="36afbdef-e971-4c88-b8fd-0f289b9dd07c" Nov 25 09:42:09 crc kubenswrapper[4854]: I1125 09:42:09.121984 4854 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="36afbdef-e971-4c88-b8fd-0f289b9dd07c" Nov 25 09:42:09 crc kubenswrapper[4854]: I1125 09:42:09.124888 4854 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="b48a9d73-ed9d-4ce4-b404-549b18e25a15" Nov 25 09:42:09 crc kubenswrapper[4854]: I1125 09:42:09.761802 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:42:09 crc kubenswrapper[4854]: I1125 09:42:09.762025 4854 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Nov 25 09:42:09 crc kubenswrapper[4854]: I1125 09:42:09.762109 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Nov 25 09:42:17 crc kubenswrapper[4854]: I1125 09:42:17.256655 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 25 09:42:18 crc kubenswrapper[4854]: I1125 09:42:18.178554 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 25 09:42:18 crc kubenswrapper[4854]: I1125 09:42:18.416165 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"cluster-monitoring-operator-dockercfg-wwt9l" Nov 25 09:42:18 crc kubenswrapper[4854]: I1125 09:42:18.884422 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 25 09:42:18 crc kubenswrapper[4854]: I1125 09:42:18.936148 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 25 09:42:19 crc kubenswrapper[4854]: I1125 09:42:19.418614 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 25 09:42:19 crc kubenswrapper[4854]: I1125 09:42:19.561476 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 25 09:42:19 crc kubenswrapper[4854]: I1125 09:42:19.762195 4854 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Nov 25 09:42:19 crc kubenswrapper[4854]: I1125 09:42:19.762264 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Nov 25 09:42:19 crc kubenswrapper[4854]: I1125 09:42:19.787407 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 25 09:42:19 crc kubenswrapper[4854]: I1125 09:42:19.959966 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 25 09:42:20 crc kubenswrapper[4854]: I1125 09:42:20.255651 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 25 09:42:20 crc kubenswrapper[4854]: I1125 09:42:20.274620 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 25 09:42:20 crc kubenswrapper[4854]: I1125 09:42:20.280815 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 25 09:42:20 crc kubenswrapper[4854]: I1125 09:42:20.328400 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 25 09:42:20 crc kubenswrapper[4854]: I1125 09:42:20.396442 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 25 09:42:20 crc kubenswrapper[4854]: I1125 09:42:20.443207 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-dockercfg-xw52g" Nov 25 09:42:20 crc kubenswrapper[4854]: I1125 09:42:20.443354 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-generated" Nov 25 09:42:20 crc kubenswrapper[4854]: I1125 09:42:20.449038 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 25 09:42:20 crc kubenswrapper[4854]: I1125 09:42:20.571518 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-web" Nov 25 09:42:20 crc kubenswrapper[4854]: I1125 09:42:20.736825 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 25 09:42:20 crc kubenswrapper[4854]: I1125 09:42:20.777484 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 25 09:42:20 crc kubenswrapper[4854]: I1125 09:42:20.800151 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 25 09:42:20 crc kubenswrapper[4854]: I1125 09:42:20.825922 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 25 09:42:20 crc kubenswrapper[4854]: I1125 09:42:20.971884 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 25 09:42:21 crc kubenswrapper[4854]: I1125 09:42:21.023532 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 25 09:42:21 crc kubenswrapper[4854]: I1125 09:42:21.192436 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 25 09:42:21 crc kubenswrapper[4854]: I1125 09:42:21.344528 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 25 09:42:21 crc kubenswrapper[4854]: I1125 09:42:21.425050 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 25 09:42:21 crc kubenswrapper[4854]: I1125 09:42:21.426658 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 25 09:42:21 crc kubenswrapper[4854]: I1125 09:42:21.431773 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 25 09:42:21 crc kubenswrapper[4854]: I1125 09:42:21.447074 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 25 09:42:21 crc kubenswrapper[4854]: I1125 09:42:21.473638 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-dockercfg-fzf6r" Nov 25 09:42:21 crc kubenswrapper[4854]: I1125 09:42:21.550714 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-asmngk5jr8a3s" Nov 25 09:42:21 crc kubenswrapper[4854]: I1125 09:42:21.604133 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 25 09:42:21 crc kubenswrapper[4854]: I1125 09:42:21.610999 4854 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 25 09:42:21 crc kubenswrapper[4854]: I1125 09:42:21.611878 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=33.611863656 podStartE2EDuration="33.611863656s" podCreationTimestamp="2025-11-25 09:41:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:42:08.13219509 +0000 UTC m=+333.985188466" watchObservedRunningTime="2025-11-25 09:42:21.611863656 +0000 UTC m=+347.464857032" Nov 25 09:42:21 crc kubenswrapper[4854]: I1125 09:42:21.616767 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 09:42:21 crc kubenswrapper[4854]: I1125 09:42:21.616818 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 09:42:21 crc kubenswrapper[4854]: I1125 09:42:21.621040 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:42:21 crc kubenswrapper[4854]: I1125 09:42:21.636821 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=13.636804867 podStartE2EDuration="13.636804867s" podCreationTimestamp="2025-11-25 09:42:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:42:21.634201138 +0000 UTC m=+347.487194514" watchObservedRunningTime="2025-11-25 09:42:21.636804867 +0000 UTC m=+347.489798243" Nov 25 09:42:21 crc kubenswrapper[4854]: I1125 09:42:21.637041 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 25 09:42:21 crc kubenswrapper[4854]: I1125 09:42:21.728377 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 25 09:42:21 crc kubenswrapper[4854]: I1125 09:42:21.746370 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 25 09:42:21 crc kubenswrapper[4854]: I1125 09:42:21.762224 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 25 09:42:22 crc kubenswrapper[4854]: I1125 09:42:22.065431 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-tls" Nov 25 09:42:22 crc kubenswrapper[4854]: I1125 09:42:22.100119 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy-web" Nov 25 09:42:22 crc kubenswrapper[4854]: I1125 09:42:22.289912 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 25 09:42:22 crc kubenswrapper[4854]: I1125 09:42:22.399786 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kubelet-serving-ca-bundle" Nov 25 09:42:22 crc kubenswrapper[4854]: I1125 09:42:22.467474 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 25 09:42:22 crc kubenswrapper[4854]: I1125 09:42:22.481927 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 25 09:42:22 crc kubenswrapper[4854]: I1125 09:42:22.619878 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 25 09:42:22 crc kubenswrapper[4854]: I1125 09:42:22.627362 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 25 09:42:22 crc kubenswrapper[4854]: I1125 09:42:22.698868 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 25 09:42:22 crc kubenswrapper[4854]: I1125 09:42:22.699113 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 25 09:42:22 crc kubenswrapper[4854]: I1125 09:42:22.811002 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 25 09:42:22 crc kubenswrapper[4854]: I1125 09:42:22.891901 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 25 09:42:23 crc kubenswrapper[4854]: I1125 09:42:23.016097 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 25 09:42:23 crc kubenswrapper[4854]: I1125 09:42:23.100119 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 25 09:42:23 crc kubenswrapper[4854]: I1125 09:42:23.152648 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 25 09:42:23 crc kubenswrapper[4854]: I1125 09:42:23.159450 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 25 09:42:23 crc kubenswrapper[4854]: I1125 09:42:23.229389 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kube-state-metrics-custom-resource-state-configmap" Nov 25 09:42:23 crc kubenswrapper[4854]: I1125 09:42:23.229424 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 25 09:42:23 crc kubenswrapper[4854]: I1125 09:42:23.237116 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 25 09:42:23 crc kubenswrapper[4854]: I1125 09:42:23.280630 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 25 09:42:23 crc kubenswrapper[4854]: I1125 09:42:23.329428 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 25 09:42:23 crc kubenswrapper[4854]: I1125 09:42:23.342354 4854 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 25 09:42:23 crc kubenswrapper[4854]: I1125 09:42:23.356102 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 25 09:42:23 crc kubenswrapper[4854]: I1125 09:42:23.519730 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-dockercfg-d7gsb" Nov 25 09:42:23 crc kubenswrapper[4854]: I1125 09:42:23.605128 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-tls-assets-0" Nov 25 09:42:23 crc kubenswrapper[4854]: I1125 09:42:23.638503 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 25 09:42:23 crc kubenswrapper[4854]: I1125 09:42:23.640417 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-dockercfg-jffmb" Nov 25 09:42:23 crc kubenswrapper[4854]: I1125 09:42:23.684645 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 25 09:42:23 crc kubenswrapper[4854]: I1125 09:42:23.687258 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 25 09:42:23 crc kubenswrapper[4854]: I1125 09:42:23.724383 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 25 09:42:23 crc kubenswrapper[4854]: I1125 09:42:23.742217 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 25 09:42:23 crc kubenswrapper[4854]: I1125 09:42:23.778126 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 25 09:42:23 crc kubenswrapper[4854]: I1125 09:42:23.838179 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 25 09:42:23 crc kubenswrapper[4854]: I1125 09:42:23.846070 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 25 09:42:23 crc kubenswrapper[4854]: I1125 09:42:23.852834 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 25 09:42:23 crc kubenswrapper[4854]: I1125 09:42:23.875849 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 25 09:42:23 crc kubenswrapper[4854]: I1125 09:42:23.896197 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 25 09:42:24 crc kubenswrapper[4854]: I1125 09:42:24.055653 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 25 09:42:24 crc kubenswrapper[4854]: I1125 09:42:24.222324 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 25 09:42:24 crc kubenswrapper[4854]: I1125 09:42:24.228635 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 25 09:42:24 crc kubenswrapper[4854]: I1125 09:42:24.228770 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 25 09:42:24 crc kubenswrapper[4854]: I1125 09:42:24.429900 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 25 09:42:24 crc kubenswrapper[4854]: I1125 09:42:24.451126 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-admission-webhook-dockercfg-kmrnp" Nov 25 09:42:24 crc kubenswrapper[4854]: I1125 09:42:24.508470 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-rbac-proxy" Nov 25 09:42:24 crc kubenswrapper[4854]: I1125 09:42:24.654594 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 25 09:42:24 crc kubenswrapper[4854]: I1125 09:42:24.658202 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 25 09:42:24 crc kubenswrapper[4854]: I1125 09:42:24.676979 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"metrics-client-ca" Nov 25 09:42:24 crc kubenswrapper[4854]: I1125 09:42:24.760402 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-admission-webhook-tls" Nov 25 09:42:24 crc kubenswrapper[4854]: I1125 09:42:24.790065 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"cluster-monitoring-operator-tls" Nov 25 09:42:24 crc kubenswrapper[4854]: I1125 09:42:24.806413 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 25 09:42:24 crc kubenswrapper[4854]: I1125 09:42:24.871318 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 25 09:42:24 crc kubenswrapper[4854]: I1125 09:42:24.895845 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 25 09:42:24 crc kubenswrapper[4854]: I1125 09:42:24.929202 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 25 09:42:24 crc kubenswrapper[4854]: I1125 09:42:24.968736 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 25 09:42:24 crc kubenswrapper[4854]: I1125 09:42:24.976492 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 25 09:42:24 crc kubenswrapper[4854]: I1125 09:42:24.990216 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 25 09:42:25 crc kubenswrapper[4854]: I1125 09:42:25.064501 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 25 09:42:25 crc kubenswrapper[4854]: I1125 09:42:25.137827 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"serving-certs-ca-bundle" Nov 25 09:42:25 crc kubenswrapper[4854]: I1125 09:42:25.153953 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-thanos-prometheus-http-client-file" Nov 25 09:42:25 crc kubenswrapper[4854]: I1125 09:42:25.175896 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 25 09:42:25 crc kubenswrapper[4854]: I1125 09:42:25.180619 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 25 09:42:25 crc kubenswrapper[4854]: I1125 09:42:25.248955 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy" Nov 25 09:42:25 crc kubenswrapper[4854]: I1125 09:42:25.254744 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-tls" Nov 25 09:42:25 crc kubenswrapper[4854]: I1125 09:42:25.255173 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-dockercfg-drgjn" Nov 25 09:42:25 crc kubenswrapper[4854]: I1125 09:42:25.256859 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-tls" Nov 25 09:42:25 crc kubenswrapper[4854]: I1125 09:42:25.366051 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 25 09:42:25 crc kubenswrapper[4854]: I1125 09:42:25.405308 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 25 09:42:25 crc kubenswrapper[4854]: I1125 09:42:25.429003 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 25 09:42:25 crc kubenswrapper[4854]: I1125 09:42:25.440810 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 25 09:42:25 crc kubenswrapper[4854]: I1125 09:42:25.539223 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 25 09:42:25 crc kubenswrapper[4854]: I1125 09:42:25.545291 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-rules" Nov 25 09:42:25 crc kubenswrapper[4854]: I1125 09:42:25.546484 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 25 09:42:25 crc kubenswrapper[4854]: I1125 09:42:25.546519 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 25 09:42:25 crc kubenswrapper[4854]: I1125 09:42:25.750550 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 25 09:42:25 crc kubenswrapper[4854]: I1125 09:42:25.768588 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 25 09:42:25 crc kubenswrapper[4854]: I1125 09:42:25.847943 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 25 09:42:25 crc kubenswrapper[4854]: I1125 09:42:25.979905 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 25 09:42:25 crc kubenswrapper[4854]: I1125 09:42:25.998124 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 25 09:42:26 crc kubenswrapper[4854]: I1125 09:42:26.005620 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"prometheus-k8s-rulefiles-0" Nov 25 09:42:26 crc kubenswrapper[4854]: I1125 09:42:26.079454 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 25 09:42:26 crc kubenswrapper[4854]: I1125 09:42:26.191521 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 25 09:42:26 crc kubenswrapper[4854]: I1125 09:42:26.207503 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 25 09:42:26 crc kubenswrapper[4854]: I1125 09:42:26.341542 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 25 09:42:26 crc kubenswrapper[4854]: I1125 09:42:26.478122 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 25 09:42:26 crc kubenswrapper[4854]: I1125 09:42:26.527868 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 25 09:42:26 crc kubenswrapper[4854]: I1125 09:42:26.544932 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 25 09:42:26 crc kubenswrapper[4854]: I1125 09:42:26.547115 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 25 09:42:26 crc kubenswrapper[4854]: I1125 09:42:26.558890 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 25 09:42:26 crc kubenswrapper[4854]: I1125 09:42:26.644280 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-tls-assets-0" Nov 25 09:42:26 crc kubenswrapper[4854]: I1125 09:42:26.731987 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"default-dockercfg-6tstp" Nov 25 09:42:26 crc kubenswrapper[4854]: I1125 09:42:26.765626 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 25 09:42:26 crc kubenswrapper[4854]: I1125 09:42:26.859764 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 25 09:42:26 crc kubenswrapper[4854]: I1125 09:42:26.864178 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 25 09:42:26 crc kubenswrapper[4854]: I1125 09:42:26.954522 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 25 09:42:27 crc kubenswrapper[4854]: I1125 09:42:27.001041 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 25 09:42:27 crc kubenswrapper[4854]: I1125 09:42:27.002524 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 25 09:42:27 crc kubenswrapper[4854]: I1125 09:42:27.008899 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 25 09:42:27 crc kubenswrapper[4854]: I1125 09:42:27.085422 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 25 09:42:27 crc kubenswrapper[4854]: I1125 09:42:27.097754 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 25 09:42:27 crc kubenswrapper[4854]: I1125 09:42:27.157366 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 25 09:42:27 crc kubenswrapper[4854]: I1125 09:42:27.217021 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 25 09:42:27 crc kubenswrapper[4854]: I1125 09:42:27.226613 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"prometheus-trusted-ca-bundle" Nov 25 09:42:27 crc kubenswrapper[4854]: I1125 09:42:27.257256 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 25 09:42:27 crc kubenswrapper[4854]: I1125 09:42:27.272058 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 25 09:42:27 crc kubenswrapper[4854]: I1125 09:42:27.285478 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 25 09:42:27 crc kubenswrapper[4854]: I1125 09:42:27.303204 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 25 09:42:27 crc kubenswrapper[4854]: I1125 09:42:27.378573 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 25 09:42:27 crc kubenswrapper[4854]: I1125 09:42:27.381246 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 25 09:42:27 crc kubenswrapper[4854]: I1125 09:42:27.442470 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 25 09:42:27 crc kubenswrapper[4854]: I1125 09:42:27.454263 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 25 09:42:27 crc kubenswrapper[4854]: I1125 09:42:27.466481 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-metrics" Nov 25 09:42:27 crc kubenswrapper[4854]: I1125 09:42:27.524564 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-grpc-tls-28ohpcjhsv8th" Nov 25 09:42:27 crc kubenswrapper[4854]: I1125 09:42:27.563124 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 25 09:42:27 crc kubenswrapper[4854]: I1125 09:42:27.563567 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"openshift-service-ca.crt" Nov 25 09:42:27 crc kubenswrapper[4854]: I1125 09:42:27.614032 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy-metric" Nov 25 09:42:27 crc kubenswrapper[4854]: I1125 09:42:27.656265 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 25 09:42:27 crc kubenswrapper[4854]: I1125 09:42:27.741064 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 25 09:42:27 crc kubenswrapper[4854]: I1125 09:42:27.972479 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 25 09:42:28 crc kubenswrapper[4854]: I1125 09:42:28.149024 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 25 09:42:28 crc kubenswrapper[4854]: I1125 09:42:28.156760 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-tls" Nov 25 09:42:28 crc kubenswrapper[4854]: I1125 09:42:28.166170 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 25 09:42:28 crc kubenswrapper[4854]: I1125 09:42:28.197372 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 25 09:42:28 crc kubenswrapper[4854]: I1125 09:42:28.221633 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 25 09:42:28 crc kubenswrapper[4854]: I1125 09:42:28.253655 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 25 09:42:28 crc kubenswrapper[4854]: I1125 09:42:28.313852 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 25 09:42:28 crc kubenswrapper[4854]: I1125 09:42:28.460162 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 25 09:42:28 crc kubenswrapper[4854]: I1125 09:42:28.540215 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 25 09:42:28 crc kubenswrapper[4854]: I1125 09:42:28.576733 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 25 09:42:28 crc kubenswrapper[4854]: I1125 09:42:28.627570 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 25 09:42:28 crc kubenswrapper[4854]: I1125 09:42:28.631913 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 25 09:42:28 crc kubenswrapper[4854]: I1125 09:42:28.666985 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 25 09:42:28 crc kubenswrapper[4854]: I1125 09:42:28.694486 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 25 09:42:28 crc kubenswrapper[4854]: I1125 09:42:28.748101 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 25 09:42:28 crc kubenswrapper[4854]: I1125 09:42:28.804051 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 25 09:42:28 crc kubenswrapper[4854]: I1125 09:42:28.831239 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 25 09:42:28 crc kubenswrapper[4854]: I1125 09:42:28.846605 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 25 09:42:28 crc kubenswrapper[4854]: I1125 09:42:28.861875 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 25 09:42:28 crc kubenswrapper[4854]: I1125 09:42:28.894329 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 25 09:42:28 crc kubenswrapper[4854]: I1125 09:42:28.901931 4854 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 25 09:42:28 crc kubenswrapper[4854]: I1125 09:42:28.914290 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 25 09:42:28 crc kubenswrapper[4854]: I1125 09:42:28.915623 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 25 09:42:29 crc kubenswrapper[4854]: I1125 09:42:29.112077 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 25 09:42:29 crc kubenswrapper[4854]: I1125 09:42:29.119713 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 25 09:42:29 crc kubenswrapper[4854]: I1125 09:42:29.184666 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s" Nov 25 09:42:29 crc kubenswrapper[4854]: I1125 09:42:29.191635 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 25 09:42:29 crc kubenswrapper[4854]: I1125 09:42:29.193716 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 25 09:42:29 crc kubenswrapper[4854]: I1125 09:42:29.205951 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 25 09:42:29 crc kubenswrapper[4854]: I1125 09:42:29.222889 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-kube-rbac-proxy-config" Nov 25 09:42:29 crc kubenswrapper[4854]: I1125 09:42:29.224396 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 25 09:42:29 crc kubenswrapper[4854]: I1125 09:42:29.251022 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-tls" Nov 25 09:42:29 crc kubenswrapper[4854]: I1125 09:42:29.296194 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 25 09:42:29 crc kubenswrapper[4854]: I1125 09:42:29.317316 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 25 09:42:29 crc kubenswrapper[4854]: I1125 09:42:29.338037 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 25 09:42:29 crc kubenswrapper[4854]: I1125 09:42:29.496139 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-kube-rbac-proxy-config" Nov 25 09:42:29 crc kubenswrapper[4854]: I1125 09:42:29.551558 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 25 09:42:29 crc kubenswrapper[4854]: I1125 09:42:29.553779 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 25 09:42:29 crc kubenswrapper[4854]: I1125 09:42:29.623080 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 25 09:42:29 crc kubenswrapper[4854]: I1125 09:42:29.677922 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 25 09:42:29 crc kubenswrapper[4854]: I1125 09:42:29.687142 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 25 09:42:29 crc kubenswrapper[4854]: I1125 09:42:29.690888 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 25 09:42:29 crc kubenswrapper[4854]: I1125 09:42:29.717866 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 25 09:42:29 crc kubenswrapper[4854]: I1125 09:42:29.761915 4854 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Nov 25 09:42:29 crc kubenswrapper[4854]: I1125 09:42:29.762017 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Nov 25 09:42:29 crc kubenswrapper[4854]: I1125 09:42:29.762093 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:42:29 crc kubenswrapper[4854]: I1125 09:42:29.763063 4854 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="kube-controller-manager" containerStatusID={"Type":"cri-o","ID":"de27b1ae4d5eadf1bb59fa7e6ffaa41880feb6f0bb2e835e7e8754b82966f217"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container kube-controller-manager failed startup probe, will be restarted" Nov 25 09:42:29 crc kubenswrapper[4854]: I1125 09:42:29.763233 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" containerID="cri-o://de27b1ae4d5eadf1bb59fa7e6ffaa41880feb6f0bb2e835e7e8754b82966f217" gracePeriod=30 Nov 25 09:42:29 crc kubenswrapper[4854]: I1125 09:42:29.873283 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 25 09:42:29 crc kubenswrapper[4854]: I1125 09:42:29.908028 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 25 09:42:29 crc kubenswrapper[4854]: I1125 09:42:29.919567 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-dockercfg-rkdk7" Nov 25 09:42:29 crc kubenswrapper[4854]: I1125 09:42:29.998046 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 25 09:42:30 crc kubenswrapper[4854]: I1125 09:42:30.080269 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 25 09:42:30 crc kubenswrapper[4854]: I1125 09:42:30.126279 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 25 09:42:30 crc kubenswrapper[4854]: I1125 09:42:30.178182 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 25 09:42:30 crc kubenswrapper[4854]: I1125 09:42:30.284264 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 25 09:42:30 crc kubenswrapper[4854]: I1125 09:42:30.324516 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 25 09:42:30 crc kubenswrapper[4854]: I1125 09:42:30.377429 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 25 09:42:30 crc kubenswrapper[4854]: I1125 09:42:30.401792 4854 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 25 09:42:30 crc kubenswrapper[4854]: I1125 09:42:30.452939 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 25 09:42:30 crc kubenswrapper[4854]: I1125 09:42:30.513834 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 25 09:42:30 crc kubenswrapper[4854]: I1125 09:42:30.538291 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 25 09:42:30 crc kubenswrapper[4854]: I1125 09:42:30.544069 4854 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 09:42:30 crc kubenswrapper[4854]: I1125 09:42:30.544305 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://968b0e5b09ef506df42aba41246c0aa9119b890613348d86e9542ba02a80ec07" gracePeriod=5 Nov 25 09:42:30 crc kubenswrapper[4854]: I1125 09:42:30.606462 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 25 09:42:30 crc kubenswrapper[4854]: I1125 09:42:30.610077 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-dockercfg-ttgv5" Nov 25 09:42:30 crc kubenswrapper[4854]: I1125 09:42:30.632856 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 25 09:42:30 crc kubenswrapper[4854]: I1125 09:42:30.641142 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-thanos-sidecar-tls" Nov 25 09:42:30 crc kubenswrapper[4854]: I1125 09:42:30.661358 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 25 09:42:30 crc kubenswrapper[4854]: I1125 09:42:30.666535 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 25 09:42:30 crc kubenswrapper[4854]: I1125 09:42:30.682999 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-tls" Nov 25 09:42:30 crc kubenswrapper[4854]: I1125 09:42:30.727014 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 25 09:42:30 crc kubenswrapper[4854]: I1125 09:42:30.788320 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 25 09:42:30 crc kubenswrapper[4854]: I1125 09:42:30.828360 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 25 09:42:30 crc kubenswrapper[4854]: I1125 09:42:30.846206 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 25 09:42:30 crc kubenswrapper[4854]: I1125 09:42:30.854960 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 25 09:42:30 crc kubenswrapper[4854]: I1125 09:42:30.857177 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"metrics-server-audit-profiles" Nov 25 09:42:30 crc kubenswrapper[4854]: I1125 09:42:30.859386 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 25 09:42:30 crc kubenswrapper[4854]: I1125 09:42:30.860657 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-tls" Nov 25 09:42:30 crc kubenswrapper[4854]: I1125 09:42:30.872828 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 25 09:42:30 crc kubenswrapper[4854]: I1125 09:42:30.899265 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 25 09:42:30 crc kubenswrapper[4854]: I1125 09:42:30.925758 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 25 09:42:30 crc kubenswrapper[4854]: I1125 09:42:30.940621 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 25 09:42:30 crc kubenswrapper[4854]: I1125 09:42:30.961137 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-client-certs" Nov 25 09:42:30 crc kubenswrapper[4854]: I1125 09:42:30.994853 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 25 09:42:31 crc kubenswrapper[4854]: I1125 09:42:31.027378 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 25 09:42:31 crc kubenswrapper[4854]: I1125 09:42:31.052726 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 25 09:42:31 crc kubenswrapper[4854]: I1125 09:42:31.218065 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 25 09:42:31 crc kubenswrapper[4854]: I1125 09:42:31.224297 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-web-config" Nov 25 09:42:31 crc kubenswrapper[4854]: I1125 09:42:31.316594 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-tls" Nov 25 09:42:31 crc kubenswrapper[4854]: I1125 09:42:31.379941 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 25 09:42:31 crc kubenswrapper[4854]: I1125 09:42:31.381312 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 25 09:42:31 crc kubenswrapper[4854]: I1125 09:42:31.410633 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 25 09:42:31 crc kubenswrapper[4854]: I1125 09:42:31.493504 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"monitoring-plugin-cert" Nov 25 09:42:31 crc kubenswrapper[4854]: I1125 09:42:31.568301 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 25 09:42:31 crc kubenswrapper[4854]: I1125 09:42:31.580186 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 25 09:42:31 crc kubenswrapper[4854]: I1125 09:42:31.709164 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 25 09:42:31 crc kubenswrapper[4854]: I1125 09:42:31.790681 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 25 09:42:31 crc kubenswrapper[4854]: I1125 09:42:31.803103 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 25 09:42:31 crc kubenswrapper[4854]: I1125 09:42:31.816030 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 25 09:42:31 crc kubenswrapper[4854]: I1125 09:42:31.929586 4854 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 25 09:42:32 crc kubenswrapper[4854]: I1125 09:42:32.008835 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 25 09:42:32 crc kubenswrapper[4854]: I1125 09:42:32.023699 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-kube-rbac-proxy-config" Nov 25 09:42:32 crc kubenswrapper[4854]: I1125 09:42:32.042982 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 25 09:42:32 crc kubenswrapper[4854]: I1125 09:42:32.132645 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 25 09:42:32 crc kubenswrapper[4854]: I1125 09:42:32.171986 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 25 09:42:32 crc kubenswrapper[4854]: I1125 09:42:32.256497 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 25 09:42:32 crc kubenswrapper[4854]: I1125 09:42:32.270379 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 25 09:42:32 crc kubenswrapper[4854]: I1125 09:42:32.331228 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 25 09:42:32 crc kubenswrapper[4854]: I1125 09:42:32.337902 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 25 09:42:32 crc kubenswrapper[4854]: I1125 09:42:32.395231 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 25 09:42:32 crc kubenswrapper[4854]: I1125 09:42:32.562160 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 25 09:42:32 crc kubenswrapper[4854]: I1125 09:42:32.574153 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-web-config" Nov 25 09:42:32 crc kubenswrapper[4854]: I1125 09:42:32.606413 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 25 09:42:32 crc kubenswrapper[4854]: I1125 09:42:32.668549 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 25 09:42:32 crc kubenswrapper[4854]: I1125 09:42:32.808489 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-kube-rbac-proxy-web" Nov 25 09:42:32 crc kubenswrapper[4854]: I1125 09:42:32.938640 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kube-root-ca.crt" Nov 25 09:42:33 crc kubenswrapper[4854]: I1125 09:42:33.041466 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 25 09:42:33 crc kubenswrapper[4854]: I1125 09:42:33.043415 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 25 09:42:33 crc kubenswrapper[4854]: I1125 09:42:33.086987 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 25 09:42:33 crc kubenswrapper[4854]: I1125 09:42:33.192471 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-grpc-tls-31000036s10th" Nov 25 09:42:33 crc kubenswrapper[4854]: I1125 09:42:33.203550 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 25 09:42:33 crc kubenswrapper[4854]: I1125 09:42:33.206409 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 25 09:42:33 crc kubenswrapper[4854]: I1125 09:42:33.209104 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 25 09:42:33 crc kubenswrapper[4854]: I1125 09:42:33.431362 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 25 09:42:33 crc kubenswrapper[4854]: I1125 09:42:33.478116 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 25 09:42:33 crc kubenswrapper[4854]: I1125 09:42:33.557941 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 25 09:42:33 crc kubenswrapper[4854]: I1125 09:42:33.640985 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 25 09:42:33 crc kubenswrapper[4854]: I1125 09:42:33.834951 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 25 09:42:33 crc kubenswrapper[4854]: I1125 09:42:33.888730 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 25 09:42:33 crc kubenswrapper[4854]: I1125 09:42:33.942462 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 25 09:42:33 crc kubenswrapper[4854]: I1125 09:42:33.971315 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 25 09:42:34 crc kubenswrapper[4854]: I1125 09:42:34.029959 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 25 09:42:34 crc kubenswrapper[4854]: I1125 09:42:34.068603 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 25 09:42:34 crc kubenswrapper[4854]: I1125 09:42:34.079373 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 25 09:42:34 crc kubenswrapper[4854]: I1125 09:42:34.267631 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 25 09:42:34 crc kubenswrapper[4854]: I1125 09:42:34.313743 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 25 09:42:34 crc kubenswrapper[4854]: I1125 09:42:34.330805 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-dockercfg-cl6hc" Nov 25 09:42:34 crc kubenswrapper[4854]: I1125 09:42:34.354656 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 25 09:42:34 crc kubenswrapper[4854]: I1125 09:42:34.436848 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 25 09:42:34 crc kubenswrapper[4854]: I1125 09:42:34.520441 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 25 09:42:34 crc kubenswrapper[4854]: I1125 09:42:34.549996 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 25 09:42:34 crc kubenswrapper[4854]: I1125 09:42:34.581025 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 25 09:42:34 crc kubenswrapper[4854]: I1125 09:42:34.724837 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 25 09:42:34 crc kubenswrapper[4854]: I1125 09:42:34.836496 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 25 09:42:35 crc kubenswrapper[4854]: I1125 09:42:35.015268 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 25 09:42:35 crc kubenswrapper[4854]: I1125 09:42:35.060604 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 25 09:42:35 crc kubenswrapper[4854]: I1125 09:42:35.246153 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"alertmanager-trusted-ca-bundle" Nov 25 09:42:35 crc kubenswrapper[4854]: I1125 09:42:35.507197 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"telemetry-config" Nov 25 09:42:35 crc kubenswrapper[4854]: I1125 09:42:35.606074 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 25 09:42:35 crc kubenswrapper[4854]: I1125 09:42:35.699532 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 25 09:42:35 crc kubenswrapper[4854]: I1125 09:42:35.792247 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 25 09:42:35 crc kubenswrapper[4854]: I1125 09:42:35.877707 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 25 09:42:35 crc kubenswrapper[4854]: I1125 09:42:35.887365 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 25 09:42:35 crc kubenswrapper[4854]: I1125 09:42:35.928035 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-kube-rbac-proxy-config" Nov 25 09:42:36 crc kubenswrapper[4854]: I1125 09:42:36.120178 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 25 09:42:36 crc kubenswrapper[4854]: I1125 09:42:36.120248 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 09:42:36 crc kubenswrapper[4854]: I1125 09:42:36.277128 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 09:42:36 crc kubenswrapper[4854]: I1125 09:42:36.277251 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 09:42:36 crc kubenswrapper[4854]: I1125 09:42:36.277332 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:42:36 crc kubenswrapper[4854]: I1125 09:42:36.277395 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 09:42:36 crc kubenswrapper[4854]: I1125 09:42:36.277463 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 09:42:36 crc kubenswrapper[4854]: I1125 09:42:36.277550 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 09:42:36 crc kubenswrapper[4854]: I1125 09:42:36.277717 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:42:36 crc kubenswrapper[4854]: I1125 09:42:36.277728 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:42:36 crc kubenswrapper[4854]: I1125 09:42:36.277860 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:42:36 crc kubenswrapper[4854]: I1125 09:42:36.278155 4854 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Nov 25 09:42:36 crc kubenswrapper[4854]: I1125 09:42:36.278188 4854 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 25 09:42:36 crc kubenswrapper[4854]: I1125 09:42:36.278215 4854 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Nov 25 09:42:36 crc kubenswrapper[4854]: I1125 09:42:36.278237 4854 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Nov 25 09:42:36 crc kubenswrapper[4854]: I1125 09:42:36.285020 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:42:36 crc kubenswrapper[4854]: I1125 09:42:36.295104 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 25 09:42:36 crc kubenswrapper[4854]: I1125 09:42:36.295161 4854 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="968b0e5b09ef506df42aba41246c0aa9119b890613348d86e9542ba02a80ec07" exitCode=137 Nov 25 09:42:36 crc kubenswrapper[4854]: I1125 09:42:36.295214 4854 scope.go:117] "RemoveContainer" containerID="968b0e5b09ef506df42aba41246c0aa9119b890613348d86e9542ba02a80ec07" Nov 25 09:42:36 crc kubenswrapper[4854]: I1125 09:42:36.295245 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 09:42:36 crc kubenswrapper[4854]: I1125 09:42:36.306692 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 25 09:42:36 crc kubenswrapper[4854]: I1125 09:42:36.355472 4854 scope.go:117] "RemoveContainer" containerID="968b0e5b09ef506df42aba41246c0aa9119b890613348d86e9542ba02a80ec07" Nov 25 09:42:36 crc kubenswrapper[4854]: E1125 09:42:36.356140 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"968b0e5b09ef506df42aba41246c0aa9119b890613348d86e9542ba02a80ec07\": container with ID starting with 968b0e5b09ef506df42aba41246c0aa9119b890613348d86e9542ba02a80ec07 not found: ID does not exist" containerID="968b0e5b09ef506df42aba41246c0aa9119b890613348d86e9542ba02a80ec07" Nov 25 09:42:36 crc kubenswrapper[4854]: I1125 09:42:36.356209 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"968b0e5b09ef506df42aba41246c0aa9119b890613348d86e9542ba02a80ec07"} err="failed to get container status \"968b0e5b09ef506df42aba41246c0aa9119b890613348d86e9542ba02a80ec07\": rpc error: code = NotFound desc = could not find container \"968b0e5b09ef506df42aba41246c0aa9119b890613348d86e9542ba02a80ec07\": container with ID starting with 968b0e5b09ef506df42aba41246c0aa9119b890613348d86e9542ba02a80ec07 not found: ID does not exist" Nov 25 09:42:36 crc kubenswrapper[4854]: I1125 09:42:36.378069 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 25 09:42:36 crc kubenswrapper[4854]: I1125 09:42:36.379229 4854 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 25 09:42:36 crc kubenswrapper[4854]: I1125 09:42:36.562448 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy" Nov 25 09:42:36 crc kubenswrapper[4854]: I1125 09:42:36.619361 4854 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 25 09:42:36 crc kubenswrapper[4854]: I1125 09:42:36.660313 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 25 09:42:36 crc kubenswrapper[4854]: I1125 09:42:36.856952 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 25 09:42:37 crc kubenswrapper[4854]: I1125 09:42:37.029574 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Nov 25 09:42:37 crc kubenswrapper[4854]: I1125 09:42:37.030035 4854 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Nov 25 09:42:37 crc kubenswrapper[4854]: I1125 09:42:37.044453 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 09:42:37 crc kubenswrapper[4854]: I1125 09:42:37.044497 4854 kubelet.go:2649] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="4e794c66-69d8-4ee7-9eea-0d43eb35b688" Nov 25 09:42:37 crc kubenswrapper[4854]: I1125 09:42:37.051805 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 09:42:37 crc kubenswrapper[4854]: I1125 09:42:37.051878 4854 kubelet.go:2673] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="4e794c66-69d8-4ee7-9eea-0d43eb35b688" Nov 25 09:42:55 crc kubenswrapper[4854]: I1125 09:42:55.028600 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:42:55 crc kubenswrapper[4854]: I1125 09:42:55.029312 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:43:00 crc kubenswrapper[4854]: I1125 09:43:00.444776 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Nov 25 09:43:00 crc kubenswrapper[4854]: I1125 09:43:00.446295 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 25 09:43:00 crc kubenswrapper[4854]: I1125 09:43:00.446342 4854 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="de27b1ae4d5eadf1bb59fa7e6ffaa41880feb6f0bb2e835e7e8754b82966f217" exitCode=137 Nov 25 09:43:00 crc kubenswrapper[4854]: I1125 09:43:00.446372 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"de27b1ae4d5eadf1bb59fa7e6ffaa41880feb6f0bb2e835e7e8754b82966f217"} Nov 25 09:43:00 crc kubenswrapper[4854]: I1125 09:43:00.446401 4854 scope.go:117] "RemoveContainer" containerID="d2735a1da35b4f4892318c2924990dcc66dfc6446e2faafb5acf5eb168a4c116" Nov 25 09:43:01 crc kubenswrapper[4854]: I1125 09:43:01.455447 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Nov 25 09:43:01 crc kubenswrapper[4854]: I1125 09:43:01.457695 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"a1e1c82f1a8b58321bcb5bf3f3a90275e87122a91c1d17d1bd725e934f6b5527"} Nov 25 09:43:06 crc kubenswrapper[4854]: I1125 09:43:06.950277 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:43:09 crc kubenswrapper[4854]: I1125 09:43:09.761943 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:43:09 crc kubenswrapper[4854]: I1125 09:43:09.768626 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:43:10 crc kubenswrapper[4854]: I1125 09:43:10.512689 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:43:17 crc kubenswrapper[4854]: I1125 09:43:17.475658 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-6qzzx"] Nov 25 09:43:17 crc kubenswrapper[4854]: I1125 09:43:17.476445 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6qzzx" podUID="243c3e75-c67a-4dcf-b76d-bc1920af0a41" containerName="route-controller-manager" containerID="cri-o://82d3ff152780d17fb41039cd53f5a05b12bd9ecb3cf2ef9e1a13c59669c39f9b" gracePeriod=30 Nov 25 09:43:17 crc kubenswrapper[4854]: I1125 09:43:17.483395 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-srrn8"] Nov 25 09:43:17 crc kubenswrapper[4854]: I1125 09:43:17.483648 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-srrn8" podUID="c8bf226e-44f3-494c-b837-b9e8b9f9904d" containerName="controller-manager" containerID="cri-o://ef0a4591bbf135c9d7c65161501e1548235260dce2379c2a7298441b4d5b4533" gracePeriod=30 Nov 25 09:43:17 crc kubenswrapper[4854]: I1125 09:43:17.496963 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-7dcb9bcd7d-srrsb"] Nov 25 09:43:17 crc kubenswrapper[4854]: E1125 09:43:17.497193 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 25 09:43:17 crc kubenswrapper[4854]: I1125 09:43:17.497209 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 25 09:43:17 crc kubenswrapper[4854]: E1125 09:43:17.497221 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0b5231b-f1be-4866-9f88-5b54b42a877a" containerName="installer" Nov 25 09:43:17 crc kubenswrapper[4854]: I1125 09:43:17.497227 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0b5231b-f1be-4866-9f88-5b54b42a877a" containerName="installer" Nov 25 09:43:17 crc kubenswrapper[4854]: I1125 09:43:17.497334 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0b5231b-f1be-4866-9f88-5b54b42a877a" containerName="installer" Nov 25 09:43:17 crc kubenswrapper[4854]: I1125 09:43:17.497351 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 25 09:43:17 crc kubenswrapper[4854]: I1125 09:43:17.497763 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7dcb9bcd7d-srrsb" Nov 25 09:43:17 crc kubenswrapper[4854]: I1125 09:43:17.549311 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-7dcb9bcd7d-srrsb"] Nov 25 09:43:17 crc kubenswrapper[4854]: I1125 09:43:17.654076 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2e2bb709-f690-49a8-85e2-c559a83da899-console-config\") pod \"console-7dcb9bcd7d-srrsb\" (UID: \"2e2bb709-f690-49a8-85e2-c559a83da899\") " pod="openshift-console/console-7dcb9bcd7d-srrsb" Nov 25 09:43:17 crc kubenswrapper[4854]: I1125 09:43:17.654158 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2e2bb709-f690-49a8-85e2-c559a83da899-oauth-serving-cert\") pod \"console-7dcb9bcd7d-srrsb\" (UID: \"2e2bb709-f690-49a8-85e2-c559a83da899\") " pod="openshift-console/console-7dcb9bcd7d-srrsb" Nov 25 09:43:17 crc kubenswrapper[4854]: I1125 09:43:17.654252 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2e2bb709-f690-49a8-85e2-c559a83da899-service-ca\") pod \"console-7dcb9bcd7d-srrsb\" (UID: \"2e2bb709-f690-49a8-85e2-c559a83da899\") " pod="openshift-console/console-7dcb9bcd7d-srrsb" Nov 25 09:43:17 crc kubenswrapper[4854]: I1125 09:43:17.654285 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2e2bb709-f690-49a8-85e2-c559a83da899-console-oauth-config\") pod \"console-7dcb9bcd7d-srrsb\" (UID: \"2e2bb709-f690-49a8-85e2-c559a83da899\") " pod="openshift-console/console-7dcb9bcd7d-srrsb" Nov 25 09:43:17 crc kubenswrapper[4854]: I1125 09:43:17.654487 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d9kb7\" (UniqueName: \"kubernetes.io/projected/2e2bb709-f690-49a8-85e2-c559a83da899-kube-api-access-d9kb7\") pod \"console-7dcb9bcd7d-srrsb\" (UID: \"2e2bb709-f690-49a8-85e2-c559a83da899\") " pod="openshift-console/console-7dcb9bcd7d-srrsb" Nov 25 09:43:17 crc kubenswrapper[4854]: I1125 09:43:17.654546 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2e2bb709-f690-49a8-85e2-c559a83da899-console-serving-cert\") pod \"console-7dcb9bcd7d-srrsb\" (UID: \"2e2bb709-f690-49a8-85e2-c559a83da899\") " pod="openshift-console/console-7dcb9bcd7d-srrsb" Nov 25 09:43:17 crc kubenswrapper[4854]: I1125 09:43:17.654578 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2e2bb709-f690-49a8-85e2-c559a83da899-trusted-ca-bundle\") pod \"console-7dcb9bcd7d-srrsb\" (UID: \"2e2bb709-f690-49a8-85e2-c559a83da899\") " pod="openshift-console/console-7dcb9bcd7d-srrsb" Nov 25 09:43:17 crc kubenswrapper[4854]: I1125 09:43:17.756413 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2e2bb709-f690-49a8-85e2-c559a83da899-console-config\") pod \"console-7dcb9bcd7d-srrsb\" (UID: \"2e2bb709-f690-49a8-85e2-c559a83da899\") " pod="openshift-console/console-7dcb9bcd7d-srrsb" Nov 25 09:43:17 crc kubenswrapper[4854]: I1125 09:43:17.756480 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2e2bb709-f690-49a8-85e2-c559a83da899-oauth-serving-cert\") pod \"console-7dcb9bcd7d-srrsb\" (UID: \"2e2bb709-f690-49a8-85e2-c559a83da899\") " pod="openshift-console/console-7dcb9bcd7d-srrsb" Nov 25 09:43:17 crc kubenswrapper[4854]: I1125 09:43:17.756541 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2e2bb709-f690-49a8-85e2-c559a83da899-service-ca\") pod \"console-7dcb9bcd7d-srrsb\" (UID: \"2e2bb709-f690-49a8-85e2-c559a83da899\") " pod="openshift-console/console-7dcb9bcd7d-srrsb" Nov 25 09:43:17 crc kubenswrapper[4854]: I1125 09:43:17.756568 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2e2bb709-f690-49a8-85e2-c559a83da899-console-oauth-config\") pod \"console-7dcb9bcd7d-srrsb\" (UID: \"2e2bb709-f690-49a8-85e2-c559a83da899\") " pod="openshift-console/console-7dcb9bcd7d-srrsb" Nov 25 09:43:17 crc kubenswrapper[4854]: I1125 09:43:17.756635 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d9kb7\" (UniqueName: \"kubernetes.io/projected/2e2bb709-f690-49a8-85e2-c559a83da899-kube-api-access-d9kb7\") pod \"console-7dcb9bcd7d-srrsb\" (UID: \"2e2bb709-f690-49a8-85e2-c559a83da899\") " pod="openshift-console/console-7dcb9bcd7d-srrsb" Nov 25 09:43:17 crc kubenswrapper[4854]: I1125 09:43:17.756664 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2e2bb709-f690-49a8-85e2-c559a83da899-console-serving-cert\") pod \"console-7dcb9bcd7d-srrsb\" (UID: \"2e2bb709-f690-49a8-85e2-c559a83da899\") " pod="openshift-console/console-7dcb9bcd7d-srrsb" Nov 25 09:43:17 crc kubenswrapper[4854]: I1125 09:43:17.756720 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2e2bb709-f690-49a8-85e2-c559a83da899-trusted-ca-bundle\") pod \"console-7dcb9bcd7d-srrsb\" (UID: \"2e2bb709-f690-49a8-85e2-c559a83da899\") " pod="openshift-console/console-7dcb9bcd7d-srrsb" Nov 25 09:43:17 crc kubenswrapper[4854]: I1125 09:43:17.758258 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2e2bb709-f690-49a8-85e2-c559a83da899-service-ca\") pod \"console-7dcb9bcd7d-srrsb\" (UID: \"2e2bb709-f690-49a8-85e2-c559a83da899\") " pod="openshift-console/console-7dcb9bcd7d-srrsb" Nov 25 09:43:17 crc kubenswrapper[4854]: I1125 09:43:17.758572 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2e2bb709-f690-49a8-85e2-c559a83da899-trusted-ca-bundle\") pod \"console-7dcb9bcd7d-srrsb\" (UID: \"2e2bb709-f690-49a8-85e2-c559a83da899\") " pod="openshift-console/console-7dcb9bcd7d-srrsb" Nov 25 09:43:17 crc kubenswrapper[4854]: I1125 09:43:17.759093 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2e2bb709-f690-49a8-85e2-c559a83da899-console-config\") pod \"console-7dcb9bcd7d-srrsb\" (UID: \"2e2bb709-f690-49a8-85e2-c559a83da899\") " pod="openshift-console/console-7dcb9bcd7d-srrsb" Nov 25 09:43:17 crc kubenswrapper[4854]: I1125 09:43:17.759458 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2e2bb709-f690-49a8-85e2-c559a83da899-oauth-serving-cert\") pod \"console-7dcb9bcd7d-srrsb\" (UID: \"2e2bb709-f690-49a8-85e2-c559a83da899\") " pod="openshift-console/console-7dcb9bcd7d-srrsb" Nov 25 09:43:17 crc kubenswrapper[4854]: I1125 09:43:17.765383 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2e2bb709-f690-49a8-85e2-c559a83da899-console-serving-cert\") pod \"console-7dcb9bcd7d-srrsb\" (UID: \"2e2bb709-f690-49a8-85e2-c559a83da899\") " pod="openshift-console/console-7dcb9bcd7d-srrsb" Nov 25 09:43:17 crc kubenswrapper[4854]: I1125 09:43:17.766776 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2e2bb709-f690-49a8-85e2-c559a83da899-console-oauth-config\") pod \"console-7dcb9bcd7d-srrsb\" (UID: \"2e2bb709-f690-49a8-85e2-c559a83da899\") " pod="openshift-console/console-7dcb9bcd7d-srrsb" Nov 25 09:43:17 crc kubenswrapper[4854]: I1125 09:43:17.781305 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d9kb7\" (UniqueName: \"kubernetes.io/projected/2e2bb709-f690-49a8-85e2-c559a83da899-kube-api-access-d9kb7\") pod \"console-7dcb9bcd7d-srrsb\" (UID: \"2e2bb709-f690-49a8-85e2-c559a83da899\") " pod="openshift-console/console-7dcb9bcd7d-srrsb" Nov 25 09:43:17 crc kubenswrapper[4854]: I1125 09:43:17.815958 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7dcb9bcd7d-srrsb" Nov 25 09:43:17 crc kubenswrapper[4854]: I1125 09:43:17.934944 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6qzzx" Nov 25 09:43:17 crc kubenswrapper[4854]: I1125 09:43:17.951469 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-srrn8" Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.061326 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/243c3e75-c67a-4dcf-b76d-bc1920af0a41-client-ca\") pod \"243c3e75-c67a-4dcf-b76d-bc1920af0a41\" (UID: \"243c3e75-c67a-4dcf-b76d-bc1920af0a41\") " Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.061392 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c8bf226e-44f3-494c-b837-b9e8b9f9904d-proxy-ca-bundles\") pod \"c8bf226e-44f3-494c-b837-b9e8b9f9904d\" (UID: \"c8bf226e-44f3-494c-b837-b9e8b9f9904d\") " Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.061427 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c8bf226e-44f3-494c-b837-b9e8b9f9904d-client-ca\") pod \"c8bf226e-44f3-494c-b837-b9e8b9f9904d\" (UID: \"c8bf226e-44f3-494c-b837-b9e8b9f9904d\") " Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.061518 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/243c3e75-c67a-4dcf-b76d-bc1920af0a41-serving-cert\") pod \"243c3e75-c67a-4dcf-b76d-bc1920af0a41\" (UID: \"243c3e75-c67a-4dcf-b76d-bc1920af0a41\") " Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.061554 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/243c3e75-c67a-4dcf-b76d-bc1920af0a41-config\") pod \"243c3e75-c67a-4dcf-b76d-bc1920af0a41\" (UID: \"243c3e75-c67a-4dcf-b76d-bc1920af0a41\") " Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.061613 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c8bf226e-44f3-494c-b837-b9e8b9f9904d-serving-cert\") pod \"c8bf226e-44f3-494c-b837-b9e8b9f9904d\" (UID: \"c8bf226e-44f3-494c-b837-b9e8b9f9904d\") " Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.061652 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rqjw6\" (UniqueName: \"kubernetes.io/projected/c8bf226e-44f3-494c-b837-b9e8b9f9904d-kube-api-access-rqjw6\") pod \"c8bf226e-44f3-494c-b837-b9e8b9f9904d\" (UID: \"c8bf226e-44f3-494c-b837-b9e8b9f9904d\") " Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.061774 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c8bf226e-44f3-494c-b837-b9e8b9f9904d-config\") pod \"c8bf226e-44f3-494c-b837-b9e8b9f9904d\" (UID: \"c8bf226e-44f3-494c-b837-b9e8b9f9904d\") " Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.061806 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rhpmr\" (UniqueName: \"kubernetes.io/projected/243c3e75-c67a-4dcf-b76d-bc1920af0a41-kube-api-access-rhpmr\") pod \"243c3e75-c67a-4dcf-b76d-bc1920af0a41\" (UID: \"243c3e75-c67a-4dcf-b76d-bc1920af0a41\") " Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.063661 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/243c3e75-c67a-4dcf-b76d-bc1920af0a41-config" (OuterVolumeSpecName: "config") pod "243c3e75-c67a-4dcf-b76d-bc1920af0a41" (UID: "243c3e75-c67a-4dcf-b76d-bc1920af0a41"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.064632 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c8bf226e-44f3-494c-b837-b9e8b9f9904d-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "c8bf226e-44f3-494c-b837-b9e8b9f9904d" (UID: "c8bf226e-44f3-494c-b837-b9e8b9f9904d"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.065040 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/243c3e75-c67a-4dcf-b76d-bc1920af0a41-client-ca" (OuterVolumeSpecName: "client-ca") pod "243c3e75-c67a-4dcf-b76d-bc1920af0a41" (UID: "243c3e75-c67a-4dcf-b76d-bc1920af0a41"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.065604 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c8bf226e-44f3-494c-b837-b9e8b9f9904d-client-ca" (OuterVolumeSpecName: "client-ca") pod "c8bf226e-44f3-494c-b837-b9e8b9f9904d" (UID: "c8bf226e-44f3-494c-b837-b9e8b9f9904d"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.066137 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/243c3e75-c67a-4dcf-b76d-bc1920af0a41-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "243c3e75-c67a-4dcf-b76d-bc1920af0a41" (UID: "243c3e75-c67a-4dcf-b76d-bc1920af0a41"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.066151 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/243c3e75-c67a-4dcf-b76d-bc1920af0a41-kube-api-access-rhpmr" (OuterVolumeSpecName: "kube-api-access-rhpmr") pod "243c3e75-c67a-4dcf-b76d-bc1920af0a41" (UID: "243c3e75-c67a-4dcf-b76d-bc1920af0a41"). InnerVolumeSpecName "kube-api-access-rhpmr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.066225 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8bf226e-44f3-494c-b837-b9e8b9f9904d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "c8bf226e-44f3-494c-b837-b9e8b9f9904d" (UID: "c8bf226e-44f3-494c-b837-b9e8b9f9904d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.066234 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c8bf226e-44f3-494c-b837-b9e8b9f9904d-config" (OuterVolumeSpecName: "config") pod "c8bf226e-44f3-494c-b837-b9e8b9f9904d" (UID: "c8bf226e-44f3-494c-b837-b9e8b9f9904d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.068275 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c8bf226e-44f3-494c-b837-b9e8b9f9904d-kube-api-access-rqjw6" (OuterVolumeSpecName: "kube-api-access-rqjw6") pod "c8bf226e-44f3-494c-b837-b9e8b9f9904d" (UID: "c8bf226e-44f3-494c-b837-b9e8b9f9904d"). InnerVolumeSpecName "kube-api-access-rqjw6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.080970 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-7dcb9bcd7d-srrsb"] Nov 25 09:43:18 crc kubenswrapper[4854]: W1125 09:43:18.083705 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2e2bb709_f690_49a8_85e2_c559a83da899.slice/crio-b43cd079dd315e59cb7429d0e313e723cf65dc3c1764d58448851abbbe94b621 WatchSource:0}: Error finding container b43cd079dd315e59cb7429d0e313e723cf65dc3c1764d58448851abbbe94b621: Status 404 returned error can't find the container with id b43cd079dd315e59cb7429d0e313e723cf65dc3c1764d58448851abbbe94b621 Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.163082 4854 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/243c3e75-c67a-4dcf-b76d-bc1920af0a41-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.163115 4854 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c8bf226e-44f3-494c-b837-b9e8b9f9904d-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.163128 4854 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c8bf226e-44f3-494c-b837-b9e8b9f9904d-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.163137 4854 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/243c3e75-c67a-4dcf-b76d-bc1920af0a41-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.163146 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/243c3e75-c67a-4dcf-b76d-bc1920af0a41-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.163154 4854 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c8bf226e-44f3-494c-b837-b9e8b9f9904d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.163162 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rqjw6\" (UniqueName: \"kubernetes.io/projected/c8bf226e-44f3-494c-b837-b9e8b9f9904d-kube-api-access-rqjw6\") on node \"crc\" DevicePath \"\"" Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.163171 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c8bf226e-44f3-494c-b837-b9e8b9f9904d-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.163179 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rhpmr\" (UniqueName: \"kubernetes.io/projected/243c3e75-c67a-4dcf-b76d-bc1920af0a41-kube-api-access-rhpmr\") on node \"crc\" DevicePath \"\"" Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.563470 4854 generic.go:334] "Generic (PLEG): container finished" podID="243c3e75-c67a-4dcf-b76d-bc1920af0a41" containerID="82d3ff152780d17fb41039cd53f5a05b12bd9ecb3cf2ef9e1a13c59669c39f9b" exitCode=0 Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.563550 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6qzzx" event={"ID":"243c3e75-c67a-4dcf-b76d-bc1920af0a41","Type":"ContainerDied","Data":"82d3ff152780d17fb41039cd53f5a05b12bd9ecb3cf2ef9e1a13c59669c39f9b"} Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.563576 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6qzzx" event={"ID":"243c3e75-c67a-4dcf-b76d-bc1920af0a41","Type":"ContainerDied","Data":"2d8accc69ff98beffee524c86389d9d2d757d6197352176385fc75a24f23aa2a"} Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.563574 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-6qzzx" Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.563593 4854 scope.go:117] "RemoveContainer" containerID="82d3ff152780d17fb41039cd53f5a05b12bd9ecb3cf2ef9e1a13c59669c39f9b" Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.565461 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7dcb9bcd7d-srrsb" event={"ID":"2e2bb709-f690-49a8-85e2-c559a83da899","Type":"ContainerStarted","Data":"a2493fcb1250ca643dd57b92e8782027660de4498e70edfeeba41b8f1d0f1675"} Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.565520 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7dcb9bcd7d-srrsb" event={"ID":"2e2bb709-f690-49a8-85e2-c559a83da899","Type":"ContainerStarted","Data":"b43cd079dd315e59cb7429d0e313e723cf65dc3c1764d58448851abbbe94b621"} Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.567636 4854 generic.go:334] "Generic (PLEG): container finished" podID="c8bf226e-44f3-494c-b837-b9e8b9f9904d" containerID="ef0a4591bbf135c9d7c65161501e1548235260dce2379c2a7298441b4d5b4533" exitCode=0 Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.567731 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-srrn8" event={"ID":"c8bf226e-44f3-494c-b837-b9e8b9f9904d","Type":"ContainerDied","Data":"ef0a4591bbf135c9d7c65161501e1548235260dce2379c2a7298441b4d5b4533"} Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.568151 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-srrn8" event={"ID":"c8bf226e-44f3-494c-b837-b9e8b9f9904d","Type":"ContainerDied","Data":"e0f04d7ac1e9c5d5744ceb54b828f3f4179789027a73fd820d67d36fb527e121"} Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.568448 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-srrn8" Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.587522 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-7dcb9bcd7d-srrsb" podStartSLOduration=1.5875047850000001 podStartE2EDuration="1.587504785s" podCreationTimestamp="2025-11-25 09:43:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:43:18.585578933 +0000 UTC m=+404.438572329" watchObservedRunningTime="2025-11-25 09:43:18.587504785 +0000 UTC m=+404.440498161" Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.588463 4854 scope.go:117] "RemoveContainer" containerID="82d3ff152780d17fb41039cd53f5a05b12bd9ecb3cf2ef9e1a13c59669c39f9b" Nov 25 09:43:18 crc kubenswrapper[4854]: E1125 09:43:18.588938 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"82d3ff152780d17fb41039cd53f5a05b12bd9ecb3cf2ef9e1a13c59669c39f9b\": container with ID starting with 82d3ff152780d17fb41039cd53f5a05b12bd9ecb3cf2ef9e1a13c59669c39f9b not found: ID does not exist" containerID="82d3ff152780d17fb41039cd53f5a05b12bd9ecb3cf2ef9e1a13c59669c39f9b" Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.588979 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"82d3ff152780d17fb41039cd53f5a05b12bd9ecb3cf2ef9e1a13c59669c39f9b"} err="failed to get container status \"82d3ff152780d17fb41039cd53f5a05b12bd9ecb3cf2ef9e1a13c59669c39f9b\": rpc error: code = NotFound desc = could not find container \"82d3ff152780d17fb41039cd53f5a05b12bd9ecb3cf2ef9e1a13c59669c39f9b\": container with ID starting with 82d3ff152780d17fb41039cd53f5a05b12bd9ecb3cf2ef9e1a13c59669c39f9b not found: ID does not exist" Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.589007 4854 scope.go:117] "RemoveContainer" containerID="ef0a4591bbf135c9d7c65161501e1548235260dce2379c2a7298441b4d5b4533" Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.601657 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-6qzzx"] Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.610709 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-6qzzx"] Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.611283 4854 scope.go:117] "RemoveContainer" containerID="ef0a4591bbf135c9d7c65161501e1548235260dce2379c2a7298441b4d5b4533" Nov 25 09:43:18 crc kubenswrapper[4854]: E1125 09:43:18.611739 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef0a4591bbf135c9d7c65161501e1548235260dce2379c2a7298441b4d5b4533\": container with ID starting with ef0a4591bbf135c9d7c65161501e1548235260dce2379c2a7298441b4d5b4533 not found: ID does not exist" containerID="ef0a4591bbf135c9d7c65161501e1548235260dce2379c2a7298441b4d5b4533" Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.611775 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef0a4591bbf135c9d7c65161501e1548235260dce2379c2a7298441b4d5b4533"} err="failed to get container status \"ef0a4591bbf135c9d7c65161501e1548235260dce2379c2a7298441b4d5b4533\": rpc error: code = NotFound desc = could not find container \"ef0a4591bbf135c9d7c65161501e1548235260dce2379c2a7298441b4d5b4533\": container with ID starting with ef0a4591bbf135c9d7c65161501e1548235260dce2379c2a7298441b4d5b4533 not found: ID does not exist" Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.622838 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-srrn8"] Nov 25 09:43:18 crc kubenswrapper[4854]: I1125 09:43:18.627870 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-srrn8"] Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.020351 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="243c3e75-c67a-4dcf-b76d-bc1920af0a41" path="/var/lib/kubelet/pods/243c3e75-c67a-4dcf-b76d-bc1920af0a41/volumes" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.021231 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c8bf226e-44f3-494c-b837-b9e8b9f9904d" path="/var/lib/kubelet/pods/c8bf226e-44f3-494c-b837-b9e8b9f9904d/volumes" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.257103 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-5fb7b98d56-dkgpn"] Nov 25 09:43:19 crc kubenswrapper[4854]: E1125 09:43:19.257753 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8bf226e-44f3-494c-b837-b9e8b9f9904d" containerName="controller-manager" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.257781 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8bf226e-44f3-494c-b837-b9e8b9f9904d" containerName="controller-manager" Nov 25 09:43:19 crc kubenswrapper[4854]: E1125 09:43:19.257803 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="243c3e75-c67a-4dcf-b76d-bc1920af0a41" containerName="route-controller-manager" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.257813 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="243c3e75-c67a-4dcf-b76d-bc1920af0a41" containerName="route-controller-manager" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.257993 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="c8bf226e-44f3-494c-b837-b9e8b9f9904d" containerName="controller-manager" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.258016 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="243c3e75-c67a-4dcf-b76d-bc1920af0a41" containerName="route-controller-manager" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.258935 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5fb7b98d56-dkgpn" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.262596 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-669b554bf5-gr9jc"] Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.263115 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.263207 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.263976 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-669b554bf5-gr9jc" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.264241 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.264511 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.264687 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.266304 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.267265 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.268063 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.269406 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.269602 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.269768 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.270234 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.274904 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-669b554bf5-gr9jc"] Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.276192 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.279871 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5fb7b98d56-dkgpn"] Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.381113 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a03873ea-e353-4689-bc6d-fd108febb0e6-serving-cert\") pod \"route-controller-manager-669b554bf5-gr9jc\" (UID: \"a03873ea-e353-4689-bc6d-fd108febb0e6\") " pod="openshift-route-controller-manager/route-controller-manager-669b554bf5-gr9jc" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.381230 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6x7s8\" (UniqueName: \"kubernetes.io/projected/a03873ea-e353-4689-bc6d-fd108febb0e6-kube-api-access-6x7s8\") pod \"route-controller-manager-669b554bf5-gr9jc\" (UID: \"a03873ea-e353-4689-bc6d-fd108febb0e6\") " pod="openshift-route-controller-manager/route-controller-manager-669b554bf5-gr9jc" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.381294 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3800761e-e06c-4570-bf59-30d5ac8a1d5e-config\") pod \"controller-manager-5fb7b98d56-dkgpn\" (UID: \"3800761e-e06c-4570-bf59-30d5ac8a1d5e\") " pod="openshift-controller-manager/controller-manager-5fb7b98d56-dkgpn" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.381321 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a03873ea-e353-4689-bc6d-fd108febb0e6-config\") pod \"route-controller-manager-669b554bf5-gr9jc\" (UID: \"a03873ea-e353-4689-bc6d-fd108febb0e6\") " pod="openshift-route-controller-manager/route-controller-manager-669b554bf5-gr9jc" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.381372 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7bfcd\" (UniqueName: \"kubernetes.io/projected/3800761e-e06c-4570-bf59-30d5ac8a1d5e-kube-api-access-7bfcd\") pod \"controller-manager-5fb7b98d56-dkgpn\" (UID: \"3800761e-e06c-4570-bf59-30d5ac8a1d5e\") " pod="openshift-controller-manager/controller-manager-5fb7b98d56-dkgpn" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.381406 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3800761e-e06c-4570-bf59-30d5ac8a1d5e-proxy-ca-bundles\") pod \"controller-manager-5fb7b98d56-dkgpn\" (UID: \"3800761e-e06c-4570-bf59-30d5ac8a1d5e\") " pod="openshift-controller-manager/controller-manager-5fb7b98d56-dkgpn" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.381453 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3800761e-e06c-4570-bf59-30d5ac8a1d5e-serving-cert\") pod \"controller-manager-5fb7b98d56-dkgpn\" (UID: \"3800761e-e06c-4570-bf59-30d5ac8a1d5e\") " pod="openshift-controller-manager/controller-manager-5fb7b98d56-dkgpn" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.381601 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a03873ea-e353-4689-bc6d-fd108febb0e6-client-ca\") pod \"route-controller-manager-669b554bf5-gr9jc\" (UID: \"a03873ea-e353-4689-bc6d-fd108febb0e6\") " pod="openshift-route-controller-manager/route-controller-manager-669b554bf5-gr9jc" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.381686 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3800761e-e06c-4570-bf59-30d5ac8a1d5e-client-ca\") pod \"controller-manager-5fb7b98d56-dkgpn\" (UID: \"3800761e-e06c-4570-bf59-30d5ac8a1d5e\") " pod="openshift-controller-manager/controller-manager-5fb7b98d56-dkgpn" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.483523 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a03873ea-e353-4689-bc6d-fd108febb0e6-client-ca\") pod \"route-controller-manager-669b554bf5-gr9jc\" (UID: \"a03873ea-e353-4689-bc6d-fd108febb0e6\") " pod="openshift-route-controller-manager/route-controller-manager-669b554bf5-gr9jc" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.483591 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3800761e-e06c-4570-bf59-30d5ac8a1d5e-client-ca\") pod \"controller-manager-5fb7b98d56-dkgpn\" (UID: \"3800761e-e06c-4570-bf59-30d5ac8a1d5e\") " pod="openshift-controller-manager/controller-manager-5fb7b98d56-dkgpn" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.483639 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a03873ea-e353-4689-bc6d-fd108febb0e6-serving-cert\") pod \"route-controller-manager-669b554bf5-gr9jc\" (UID: \"a03873ea-e353-4689-bc6d-fd108febb0e6\") " pod="openshift-route-controller-manager/route-controller-manager-669b554bf5-gr9jc" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.483690 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6x7s8\" (UniqueName: \"kubernetes.io/projected/a03873ea-e353-4689-bc6d-fd108febb0e6-kube-api-access-6x7s8\") pod \"route-controller-manager-669b554bf5-gr9jc\" (UID: \"a03873ea-e353-4689-bc6d-fd108febb0e6\") " pod="openshift-route-controller-manager/route-controller-manager-669b554bf5-gr9jc" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.483725 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3800761e-e06c-4570-bf59-30d5ac8a1d5e-config\") pod \"controller-manager-5fb7b98d56-dkgpn\" (UID: \"3800761e-e06c-4570-bf59-30d5ac8a1d5e\") " pod="openshift-controller-manager/controller-manager-5fb7b98d56-dkgpn" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.483748 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a03873ea-e353-4689-bc6d-fd108febb0e6-config\") pod \"route-controller-manager-669b554bf5-gr9jc\" (UID: \"a03873ea-e353-4689-bc6d-fd108febb0e6\") " pod="openshift-route-controller-manager/route-controller-manager-669b554bf5-gr9jc" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.483796 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7bfcd\" (UniqueName: \"kubernetes.io/projected/3800761e-e06c-4570-bf59-30d5ac8a1d5e-kube-api-access-7bfcd\") pod \"controller-manager-5fb7b98d56-dkgpn\" (UID: \"3800761e-e06c-4570-bf59-30d5ac8a1d5e\") " pod="openshift-controller-manager/controller-manager-5fb7b98d56-dkgpn" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.483827 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3800761e-e06c-4570-bf59-30d5ac8a1d5e-proxy-ca-bundles\") pod \"controller-manager-5fb7b98d56-dkgpn\" (UID: \"3800761e-e06c-4570-bf59-30d5ac8a1d5e\") " pod="openshift-controller-manager/controller-manager-5fb7b98d56-dkgpn" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.483875 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3800761e-e06c-4570-bf59-30d5ac8a1d5e-serving-cert\") pod \"controller-manager-5fb7b98d56-dkgpn\" (UID: \"3800761e-e06c-4570-bf59-30d5ac8a1d5e\") " pod="openshift-controller-manager/controller-manager-5fb7b98d56-dkgpn" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.484966 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3800761e-e06c-4570-bf59-30d5ac8a1d5e-client-ca\") pod \"controller-manager-5fb7b98d56-dkgpn\" (UID: \"3800761e-e06c-4570-bf59-30d5ac8a1d5e\") " pod="openshift-controller-manager/controller-manager-5fb7b98d56-dkgpn" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.484973 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a03873ea-e353-4689-bc6d-fd108febb0e6-client-ca\") pod \"route-controller-manager-669b554bf5-gr9jc\" (UID: \"a03873ea-e353-4689-bc6d-fd108febb0e6\") " pod="openshift-route-controller-manager/route-controller-manager-669b554bf5-gr9jc" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.485084 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3800761e-e06c-4570-bf59-30d5ac8a1d5e-proxy-ca-bundles\") pod \"controller-manager-5fb7b98d56-dkgpn\" (UID: \"3800761e-e06c-4570-bf59-30d5ac8a1d5e\") " pod="openshift-controller-manager/controller-manager-5fb7b98d56-dkgpn" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.485994 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a03873ea-e353-4689-bc6d-fd108febb0e6-config\") pod \"route-controller-manager-669b554bf5-gr9jc\" (UID: \"a03873ea-e353-4689-bc6d-fd108febb0e6\") " pod="openshift-route-controller-manager/route-controller-manager-669b554bf5-gr9jc" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.486333 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3800761e-e06c-4570-bf59-30d5ac8a1d5e-config\") pod \"controller-manager-5fb7b98d56-dkgpn\" (UID: \"3800761e-e06c-4570-bf59-30d5ac8a1d5e\") " pod="openshift-controller-manager/controller-manager-5fb7b98d56-dkgpn" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.488735 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a03873ea-e353-4689-bc6d-fd108febb0e6-serving-cert\") pod \"route-controller-manager-669b554bf5-gr9jc\" (UID: \"a03873ea-e353-4689-bc6d-fd108febb0e6\") " pod="openshift-route-controller-manager/route-controller-manager-669b554bf5-gr9jc" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.488896 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3800761e-e06c-4570-bf59-30d5ac8a1d5e-serving-cert\") pod \"controller-manager-5fb7b98d56-dkgpn\" (UID: \"3800761e-e06c-4570-bf59-30d5ac8a1d5e\") " pod="openshift-controller-manager/controller-manager-5fb7b98d56-dkgpn" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.501845 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6x7s8\" (UniqueName: \"kubernetes.io/projected/a03873ea-e353-4689-bc6d-fd108febb0e6-kube-api-access-6x7s8\") pod \"route-controller-manager-669b554bf5-gr9jc\" (UID: \"a03873ea-e353-4689-bc6d-fd108febb0e6\") " pod="openshift-route-controller-manager/route-controller-manager-669b554bf5-gr9jc" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.502639 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7bfcd\" (UniqueName: \"kubernetes.io/projected/3800761e-e06c-4570-bf59-30d5ac8a1d5e-kube-api-access-7bfcd\") pod \"controller-manager-5fb7b98d56-dkgpn\" (UID: \"3800761e-e06c-4570-bf59-30d5ac8a1d5e\") " pod="openshift-controller-manager/controller-manager-5fb7b98d56-dkgpn" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.583250 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5fb7b98d56-dkgpn" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.592716 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-669b554bf5-gr9jc" Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.807190 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5fb7b98d56-dkgpn"] Nov 25 09:43:19 crc kubenswrapper[4854]: I1125 09:43:19.859623 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-669b554bf5-gr9jc"] Nov 25 09:43:19 crc kubenswrapper[4854]: W1125 09:43:19.872018 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda03873ea_e353_4689_bc6d_fd108febb0e6.slice/crio-1575e0c8a8d36b2b7cfafaacad0c4b7e3b25333ee49be1fe676a57c9bf29a049 WatchSource:0}: Error finding container 1575e0c8a8d36b2b7cfafaacad0c4b7e3b25333ee49be1fe676a57c9bf29a049: Status 404 returned error can't find the container with id 1575e0c8a8d36b2b7cfafaacad0c4b7e3b25333ee49be1fe676a57c9bf29a049 Nov 25 09:43:20 crc kubenswrapper[4854]: I1125 09:43:20.583478 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5fb7b98d56-dkgpn" event={"ID":"3800761e-e06c-4570-bf59-30d5ac8a1d5e","Type":"ContainerStarted","Data":"da9a91a80bd106cf914d86c6724a4fe4906f4bab2359b8484956474d90565b79"} Nov 25 09:43:20 crc kubenswrapper[4854]: I1125 09:43:20.583838 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5fb7b98d56-dkgpn" event={"ID":"3800761e-e06c-4570-bf59-30d5ac8a1d5e","Type":"ContainerStarted","Data":"f0f1d2f1f5e95ef343de0e9f37d1b68ef7e89b87730c8df9c782e01ce532d921"} Nov 25 09:43:20 crc kubenswrapper[4854]: I1125 09:43:20.583858 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-5fb7b98d56-dkgpn" Nov 25 09:43:20 crc kubenswrapper[4854]: I1125 09:43:20.584858 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-669b554bf5-gr9jc" event={"ID":"a03873ea-e353-4689-bc6d-fd108febb0e6","Type":"ContainerStarted","Data":"4af17edc14bc5736c761db6415cf719347e5599e27c377c6dc7318565e44bb1a"} Nov 25 09:43:20 crc kubenswrapper[4854]: I1125 09:43:20.584889 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-669b554bf5-gr9jc" event={"ID":"a03873ea-e353-4689-bc6d-fd108febb0e6","Type":"ContainerStarted","Data":"1575e0c8a8d36b2b7cfafaacad0c4b7e3b25333ee49be1fe676a57c9bf29a049"} Nov 25 09:43:20 crc kubenswrapper[4854]: I1125 09:43:20.585102 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-669b554bf5-gr9jc" Nov 25 09:43:20 crc kubenswrapper[4854]: I1125 09:43:20.589698 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-669b554bf5-gr9jc" Nov 25 09:43:20 crc kubenswrapper[4854]: I1125 09:43:20.590333 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-5fb7b98d56-dkgpn" Nov 25 09:43:20 crc kubenswrapper[4854]: I1125 09:43:20.600638 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-5fb7b98d56-dkgpn" podStartSLOduration=3.600621391 podStartE2EDuration="3.600621391s" podCreationTimestamp="2025-11-25 09:43:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:43:20.599959203 +0000 UTC m=+406.452952599" watchObservedRunningTime="2025-11-25 09:43:20.600621391 +0000 UTC m=+406.453614767" Nov 25 09:43:20 crc kubenswrapper[4854]: I1125 09:43:20.627905 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-669b554bf5-gr9jc" podStartSLOduration=3.627888552 podStartE2EDuration="3.627888552s" podCreationTimestamp="2025-11-25 09:43:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:43:20.627101221 +0000 UTC m=+406.480094607" watchObservedRunningTime="2025-11-25 09:43:20.627888552 +0000 UTC m=+406.480881928" Nov 25 09:43:25 crc kubenswrapper[4854]: I1125 09:43:25.029000 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:43:25 crc kubenswrapper[4854]: I1125 09:43:25.029544 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:43:27 crc kubenswrapper[4854]: I1125 09:43:27.816080 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-7dcb9bcd7d-srrsb" Nov 25 09:43:27 crc kubenswrapper[4854]: I1125 09:43:27.817338 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-7dcb9bcd7d-srrsb" Nov 25 09:43:27 crc kubenswrapper[4854]: I1125 09:43:27.820905 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-7dcb9bcd7d-srrsb" Nov 25 09:43:28 crc kubenswrapper[4854]: I1125 09:43:28.631135 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-7dcb9bcd7d-srrsb" Nov 25 09:43:28 crc kubenswrapper[4854]: I1125 09:43:28.679367 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-7796957d69-r8tsd"] Nov 25 09:43:42 crc kubenswrapper[4854]: I1125 09:43:42.140347 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-5fb7b98d56-dkgpn"] Nov 25 09:43:42 crc kubenswrapper[4854]: I1125 09:43:42.141170 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-5fb7b98d56-dkgpn" podUID="3800761e-e06c-4570-bf59-30d5ac8a1d5e" containerName="controller-manager" containerID="cri-o://da9a91a80bd106cf914d86c6724a4fe4906f4bab2359b8484956474d90565b79" gracePeriod=30 Nov 25 09:43:42 crc kubenswrapper[4854]: I1125 09:43:42.676360 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5fb7b98d56-dkgpn" Nov 25 09:43:42 crc kubenswrapper[4854]: I1125 09:43:42.705576 4854 generic.go:334] "Generic (PLEG): container finished" podID="3800761e-e06c-4570-bf59-30d5ac8a1d5e" containerID="da9a91a80bd106cf914d86c6724a4fe4906f4bab2359b8484956474d90565b79" exitCode=0 Nov 25 09:43:42 crc kubenswrapper[4854]: I1125 09:43:42.705618 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5fb7b98d56-dkgpn" event={"ID":"3800761e-e06c-4570-bf59-30d5ac8a1d5e","Type":"ContainerDied","Data":"da9a91a80bd106cf914d86c6724a4fe4906f4bab2359b8484956474d90565b79"} Nov 25 09:43:42 crc kubenswrapper[4854]: I1125 09:43:42.705645 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5fb7b98d56-dkgpn" event={"ID":"3800761e-e06c-4570-bf59-30d5ac8a1d5e","Type":"ContainerDied","Data":"f0f1d2f1f5e95ef343de0e9f37d1b68ef7e89b87730c8df9c782e01ce532d921"} Nov 25 09:43:42 crc kubenswrapper[4854]: I1125 09:43:42.705644 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5fb7b98d56-dkgpn" Nov 25 09:43:42 crc kubenswrapper[4854]: I1125 09:43:42.705661 4854 scope.go:117] "RemoveContainer" containerID="da9a91a80bd106cf914d86c6724a4fe4906f4bab2359b8484956474d90565b79" Nov 25 09:43:42 crc kubenswrapper[4854]: I1125 09:43:42.722743 4854 scope.go:117] "RemoveContainer" containerID="da9a91a80bd106cf914d86c6724a4fe4906f4bab2359b8484956474d90565b79" Nov 25 09:43:42 crc kubenswrapper[4854]: E1125 09:43:42.723307 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"da9a91a80bd106cf914d86c6724a4fe4906f4bab2359b8484956474d90565b79\": container with ID starting with da9a91a80bd106cf914d86c6724a4fe4906f4bab2359b8484956474d90565b79 not found: ID does not exist" containerID="da9a91a80bd106cf914d86c6724a4fe4906f4bab2359b8484956474d90565b79" Nov 25 09:43:42 crc kubenswrapper[4854]: I1125 09:43:42.723348 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"da9a91a80bd106cf914d86c6724a4fe4906f4bab2359b8484956474d90565b79"} err="failed to get container status \"da9a91a80bd106cf914d86c6724a4fe4906f4bab2359b8484956474d90565b79\": rpc error: code = NotFound desc = could not find container \"da9a91a80bd106cf914d86c6724a4fe4906f4bab2359b8484956474d90565b79\": container with ID starting with da9a91a80bd106cf914d86c6724a4fe4906f4bab2359b8484956474d90565b79 not found: ID does not exist" Nov 25 09:43:42 crc kubenswrapper[4854]: I1125 09:43:42.813606 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3800761e-e06c-4570-bf59-30d5ac8a1d5e-serving-cert\") pod \"3800761e-e06c-4570-bf59-30d5ac8a1d5e\" (UID: \"3800761e-e06c-4570-bf59-30d5ac8a1d5e\") " Nov 25 09:43:42 crc kubenswrapper[4854]: I1125 09:43:42.813718 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3800761e-e06c-4570-bf59-30d5ac8a1d5e-config\") pod \"3800761e-e06c-4570-bf59-30d5ac8a1d5e\" (UID: \"3800761e-e06c-4570-bf59-30d5ac8a1d5e\") " Nov 25 09:43:42 crc kubenswrapper[4854]: I1125 09:43:42.813777 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7bfcd\" (UniqueName: \"kubernetes.io/projected/3800761e-e06c-4570-bf59-30d5ac8a1d5e-kube-api-access-7bfcd\") pod \"3800761e-e06c-4570-bf59-30d5ac8a1d5e\" (UID: \"3800761e-e06c-4570-bf59-30d5ac8a1d5e\") " Nov 25 09:43:42 crc kubenswrapper[4854]: I1125 09:43:42.813797 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3800761e-e06c-4570-bf59-30d5ac8a1d5e-client-ca\") pod \"3800761e-e06c-4570-bf59-30d5ac8a1d5e\" (UID: \"3800761e-e06c-4570-bf59-30d5ac8a1d5e\") " Nov 25 09:43:42 crc kubenswrapper[4854]: I1125 09:43:42.813834 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3800761e-e06c-4570-bf59-30d5ac8a1d5e-proxy-ca-bundles\") pod \"3800761e-e06c-4570-bf59-30d5ac8a1d5e\" (UID: \"3800761e-e06c-4570-bf59-30d5ac8a1d5e\") " Nov 25 09:43:42 crc kubenswrapper[4854]: I1125 09:43:42.814712 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3800761e-e06c-4570-bf59-30d5ac8a1d5e-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "3800761e-e06c-4570-bf59-30d5ac8a1d5e" (UID: "3800761e-e06c-4570-bf59-30d5ac8a1d5e"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:43:42 crc kubenswrapper[4854]: I1125 09:43:42.814754 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3800761e-e06c-4570-bf59-30d5ac8a1d5e-config" (OuterVolumeSpecName: "config") pod "3800761e-e06c-4570-bf59-30d5ac8a1d5e" (UID: "3800761e-e06c-4570-bf59-30d5ac8a1d5e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:43:42 crc kubenswrapper[4854]: I1125 09:43:42.815300 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3800761e-e06c-4570-bf59-30d5ac8a1d5e-client-ca" (OuterVolumeSpecName: "client-ca") pod "3800761e-e06c-4570-bf59-30d5ac8a1d5e" (UID: "3800761e-e06c-4570-bf59-30d5ac8a1d5e"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:43:42 crc kubenswrapper[4854]: I1125 09:43:42.820766 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3800761e-e06c-4570-bf59-30d5ac8a1d5e-kube-api-access-7bfcd" (OuterVolumeSpecName: "kube-api-access-7bfcd") pod "3800761e-e06c-4570-bf59-30d5ac8a1d5e" (UID: "3800761e-e06c-4570-bf59-30d5ac8a1d5e"). InnerVolumeSpecName "kube-api-access-7bfcd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:43:42 crc kubenswrapper[4854]: I1125 09:43:42.822368 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3800761e-e06c-4570-bf59-30d5ac8a1d5e-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "3800761e-e06c-4570-bf59-30d5ac8a1d5e" (UID: "3800761e-e06c-4570-bf59-30d5ac8a1d5e"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:43:42 crc kubenswrapper[4854]: I1125 09:43:42.915599 4854 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3800761e-e06c-4570-bf59-30d5ac8a1d5e-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:43:42 crc kubenswrapper[4854]: I1125 09:43:42.915642 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3800761e-e06c-4570-bf59-30d5ac8a1d5e-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:43:42 crc kubenswrapper[4854]: I1125 09:43:42.915654 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7bfcd\" (UniqueName: \"kubernetes.io/projected/3800761e-e06c-4570-bf59-30d5ac8a1d5e-kube-api-access-7bfcd\") on node \"crc\" DevicePath \"\"" Nov 25 09:43:42 crc kubenswrapper[4854]: I1125 09:43:42.915680 4854 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3800761e-e06c-4570-bf59-30d5ac8a1d5e-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:43:42 crc kubenswrapper[4854]: I1125 09:43:42.915691 4854 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3800761e-e06c-4570-bf59-30d5ac8a1d5e-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 25 09:43:43 crc kubenswrapper[4854]: I1125 09:43:43.049136 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-5fb7b98d56-dkgpn"] Nov 25 09:43:43 crc kubenswrapper[4854]: I1125 09:43:43.052633 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-5fb7b98d56-dkgpn"] Nov 25 09:43:43 crc kubenswrapper[4854]: I1125 09:43:43.271721 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-656c84567c-m62wt"] Nov 25 09:43:43 crc kubenswrapper[4854]: E1125 09:43:43.272020 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3800761e-e06c-4570-bf59-30d5ac8a1d5e" containerName="controller-manager" Nov 25 09:43:43 crc kubenswrapper[4854]: I1125 09:43:43.272037 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="3800761e-e06c-4570-bf59-30d5ac8a1d5e" containerName="controller-manager" Nov 25 09:43:43 crc kubenswrapper[4854]: I1125 09:43:43.272214 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="3800761e-e06c-4570-bf59-30d5ac8a1d5e" containerName="controller-manager" Nov 25 09:43:43 crc kubenswrapper[4854]: I1125 09:43:43.272797 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-656c84567c-m62wt" Nov 25 09:43:43 crc kubenswrapper[4854]: I1125 09:43:43.277034 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 25 09:43:43 crc kubenswrapper[4854]: I1125 09:43:43.277235 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 25 09:43:43 crc kubenswrapper[4854]: I1125 09:43:43.277568 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 25 09:43:43 crc kubenswrapper[4854]: I1125 09:43:43.278646 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 25 09:43:43 crc kubenswrapper[4854]: I1125 09:43:43.279545 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 25 09:43:43 crc kubenswrapper[4854]: I1125 09:43:43.280247 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 25 09:43:43 crc kubenswrapper[4854]: I1125 09:43:43.285477 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-656c84567c-m62wt"] Nov 25 09:43:43 crc kubenswrapper[4854]: I1125 09:43:43.288207 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 25 09:43:43 crc kubenswrapper[4854]: I1125 09:43:43.424280 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xm6kb\" (UniqueName: \"kubernetes.io/projected/ad0dfddc-69c3-428a-bea1-0ed6e1729cdc-kube-api-access-xm6kb\") pod \"controller-manager-656c84567c-m62wt\" (UID: \"ad0dfddc-69c3-428a-bea1-0ed6e1729cdc\") " pod="openshift-controller-manager/controller-manager-656c84567c-m62wt" Nov 25 09:43:43 crc kubenswrapper[4854]: I1125 09:43:43.424402 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ad0dfddc-69c3-428a-bea1-0ed6e1729cdc-config\") pod \"controller-manager-656c84567c-m62wt\" (UID: \"ad0dfddc-69c3-428a-bea1-0ed6e1729cdc\") " pod="openshift-controller-manager/controller-manager-656c84567c-m62wt" Nov 25 09:43:43 crc kubenswrapper[4854]: I1125 09:43:43.424433 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ad0dfddc-69c3-428a-bea1-0ed6e1729cdc-serving-cert\") pod \"controller-manager-656c84567c-m62wt\" (UID: \"ad0dfddc-69c3-428a-bea1-0ed6e1729cdc\") " pod="openshift-controller-manager/controller-manager-656c84567c-m62wt" Nov 25 09:43:43 crc kubenswrapper[4854]: I1125 09:43:43.424512 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ad0dfddc-69c3-428a-bea1-0ed6e1729cdc-proxy-ca-bundles\") pod \"controller-manager-656c84567c-m62wt\" (UID: \"ad0dfddc-69c3-428a-bea1-0ed6e1729cdc\") " pod="openshift-controller-manager/controller-manager-656c84567c-m62wt" Nov 25 09:43:43 crc kubenswrapper[4854]: I1125 09:43:43.424544 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ad0dfddc-69c3-428a-bea1-0ed6e1729cdc-client-ca\") pod \"controller-manager-656c84567c-m62wt\" (UID: \"ad0dfddc-69c3-428a-bea1-0ed6e1729cdc\") " pod="openshift-controller-manager/controller-manager-656c84567c-m62wt" Nov 25 09:43:43 crc kubenswrapper[4854]: I1125 09:43:43.525889 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ad0dfddc-69c3-428a-bea1-0ed6e1729cdc-config\") pod \"controller-manager-656c84567c-m62wt\" (UID: \"ad0dfddc-69c3-428a-bea1-0ed6e1729cdc\") " pod="openshift-controller-manager/controller-manager-656c84567c-m62wt" Nov 25 09:43:43 crc kubenswrapper[4854]: I1125 09:43:43.525945 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ad0dfddc-69c3-428a-bea1-0ed6e1729cdc-serving-cert\") pod \"controller-manager-656c84567c-m62wt\" (UID: \"ad0dfddc-69c3-428a-bea1-0ed6e1729cdc\") " pod="openshift-controller-manager/controller-manager-656c84567c-m62wt" Nov 25 09:43:43 crc kubenswrapper[4854]: I1125 09:43:43.526038 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ad0dfddc-69c3-428a-bea1-0ed6e1729cdc-proxy-ca-bundles\") pod \"controller-manager-656c84567c-m62wt\" (UID: \"ad0dfddc-69c3-428a-bea1-0ed6e1729cdc\") " pod="openshift-controller-manager/controller-manager-656c84567c-m62wt" Nov 25 09:43:43 crc kubenswrapper[4854]: I1125 09:43:43.526085 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ad0dfddc-69c3-428a-bea1-0ed6e1729cdc-client-ca\") pod \"controller-manager-656c84567c-m62wt\" (UID: \"ad0dfddc-69c3-428a-bea1-0ed6e1729cdc\") " pod="openshift-controller-manager/controller-manager-656c84567c-m62wt" Nov 25 09:43:43 crc kubenswrapper[4854]: I1125 09:43:43.526118 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xm6kb\" (UniqueName: \"kubernetes.io/projected/ad0dfddc-69c3-428a-bea1-0ed6e1729cdc-kube-api-access-xm6kb\") pod \"controller-manager-656c84567c-m62wt\" (UID: \"ad0dfddc-69c3-428a-bea1-0ed6e1729cdc\") " pod="openshift-controller-manager/controller-manager-656c84567c-m62wt" Nov 25 09:43:43 crc kubenswrapper[4854]: I1125 09:43:43.527368 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ad0dfddc-69c3-428a-bea1-0ed6e1729cdc-proxy-ca-bundles\") pod \"controller-manager-656c84567c-m62wt\" (UID: \"ad0dfddc-69c3-428a-bea1-0ed6e1729cdc\") " pod="openshift-controller-manager/controller-manager-656c84567c-m62wt" Nov 25 09:43:43 crc kubenswrapper[4854]: I1125 09:43:43.527430 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ad0dfddc-69c3-428a-bea1-0ed6e1729cdc-client-ca\") pod \"controller-manager-656c84567c-m62wt\" (UID: \"ad0dfddc-69c3-428a-bea1-0ed6e1729cdc\") " pod="openshift-controller-manager/controller-manager-656c84567c-m62wt" Nov 25 09:43:43 crc kubenswrapper[4854]: I1125 09:43:43.528237 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ad0dfddc-69c3-428a-bea1-0ed6e1729cdc-config\") pod \"controller-manager-656c84567c-m62wt\" (UID: \"ad0dfddc-69c3-428a-bea1-0ed6e1729cdc\") " pod="openshift-controller-manager/controller-manager-656c84567c-m62wt" Nov 25 09:43:43 crc kubenswrapper[4854]: I1125 09:43:43.530434 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ad0dfddc-69c3-428a-bea1-0ed6e1729cdc-serving-cert\") pod \"controller-manager-656c84567c-m62wt\" (UID: \"ad0dfddc-69c3-428a-bea1-0ed6e1729cdc\") " pod="openshift-controller-manager/controller-manager-656c84567c-m62wt" Nov 25 09:43:43 crc kubenswrapper[4854]: I1125 09:43:43.546371 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xm6kb\" (UniqueName: \"kubernetes.io/projected/ad0dfddc-69c3-428a-bea1-0ed6e1729cdc-kube-api-access-xm6kb\") pod \"controller-manager-656c84567c-m62wt\" (UID: \"ad0dfddc-69c3-428a-bea1-0ed6e1729cdc\") " pod="openshift-controller-manager/controller-manager-656c84567c-m62wt" Nov 25 09:43:43 crc kubenswrapper[4854]: I1125 09:43:43.606861 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-656c84567c-m62wt" Nov 25 09:43:44 crc kubenswrapper[4854]: I1125 09:43:44.009049 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-656c84567c-m62wt"] Nov 25 09:43:44 crc kubenswrapper[4854]: I1125 09:43:44.729094 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-656c84567c-m62wt" event={"ID":"ad0dfddc-69c3-428a-bea1-0ed6e1729cdc","Type":"ContainerStarted","Data":"4c64043448f9a88e55a4877224d3c13ccb3a143b398c87a26cf43715da706f7c"} Nov 25 09:43:44 crc kubenswrapper[4854]: I1125 09:43:44.729392 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-656c84567c-m62wt" event={"ID":"ad0dfddc-69c3-428a-bea1-0ed6e1729cdc","Type":"ContainerStarted","Data":"6707d53d189246d86602b7200e628a4db79ebb5e6309e3f913dd0eb09fabf75d"} Nov 25 09:43:44 crc kubenswrapper[4854]: I1125 09:43:44.730487 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-656c84567c-m62wt" Nov 25 09:43:44 crc kubenswrapper[4854]: I1125 09:43:44.736601 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-656c84567c-m62wt" Nov 25 09:43:44 crc kubenswrapper[4854]: I1125 09:43:44.746824 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-656c84567c-m62wt" podStartSLOduration=2.746803562 podStartE2EDuration="2.746803562s" podCreationTimestamp="2025-11-25 09:43:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:43:44.744964223 +0000 UTC m=+430.597957599" watchObservedRunningTime="2025-11-25 09:43:44.746803562 +0000 UTC m=+430.599796938" Nov 25 09:43:45 crc kubenswrapper[4854]: I1125 09:43:45.020348 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3800761e-e06c-4570-bf59-30d5ac8a1d5e" path="/var/lib/kubelet/pods/3800761e-e06c-4570-bf59-30d5ac8a1d5e/volumes" Nov 25 09:43:53 crc kubenswrapper[4854]: I1125 09:43:53.722908 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-7796957d69-r8tsd" podUID="04cd4375-6fa3-45f3-947a-e5ca050d03ab" containerName="console" containerID="cri-o://0eb44c4ead739d4bcffa1f11f145ae1a225284764b69c00b22e7593a8a21e0fc" gracePeriod=15 Nov 25 09:43:54 crc kubenswrapper[4854]: I1125 09:43:54.176868 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-7796957d69-r8tsd_04cd4375-6fa3-45f3-947a-e5ca050d03ab/console/0.log" Nov 25 09:43:54 crc kubenswrapper[4854]: I1125 09:43:54.177186 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7796957d69-r8tsd" Nov 25 09:43:54 crc kubenswrapper[4854]: I1125 09:43:54.372262 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/04cd4375-6fa3-45f3-947a-e5ca050d03ab-console-config\") pod \"04cd4375-6fa3-45f3-947a-e5ca050d03ab\" (UID: \"04cd4375-6fa3-45f3-947a-e5ca050d03ab\") " Nov 25 09:43:54 crc kubenswrapper[4854]: I1125 09:43:54.372442 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/04cd4375-6fa3-45f3-947a-e5ca050d03ab-service-ca\") pod \"04cd4375-6fa3-45f3-947a-e5ca050d03ab\" (UID: \"04cd4375-6fa3-45f3-947a-e5ca050d03ab\") " Nov 25 09:43:54 crc kubenswrapper[4854]: I1125 09:43:54.372476 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/04cd4375-6fa3-45f3-947a-e5ca050d03ab-console-serving-cert\") pod \"04cd4375-6fa3-45f3-947a-e5ca050d03ab\" (UID: \"04cd4375-6fa3-45f3-947a-e5ca050d03ab\") " Nov 25 09:43:54 crc kubenswrapper[4854]: I1125 09:43:54.372549 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/04cd4375-6fa3-45f3-947a-e5ca050d03ab-console-oauth-config\") pod \"04cd4375-6fa3-45f3-947a-e5ca050d03ab\" (UID: \"04cd4375-6fa3-45f3-947a-e5ca050d03ab\") " Nov 25 09:43:54 crc kubenswrapper[4854]: I1125 09:43:54.372572 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pqdwm\" (UniqueName: \"kubernetes.io/projected/04cd4375-6fa3-45f3-947a-e5ca050d03ab-kube-api-access-pqdwm\") pod \"04cd4375-6fa3-45f3-947a-e5ca050d03ab\" (UID: \"04cd4375-6fa3-45f3-947a-e5ca050d03ab\") " Nov 25 09:43:54 crc kubenswrapper[4854]: I1125 09:43:54.374341 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/04cd4375-6fa3-45f3-947a-e5ca050d03ab-oauth-serving-cert\") pod \"04cd4375-6fa3-45f3-947a-e5ca050d03ab\" (UID: \"04cd4375-6fa3-45f3-947a-e5ca050d03ab\") " Nov 25 09:43:54 crc kubenswrapper[4854]: I1125 09:43:54.374386 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/04cd4375-6fa3-45f3-947a-e5ca050d03ab-trusted-ca-bundle\") pod \"04cd4375-6fa3-45f3-947a-e5ca050d03ab\" (UID: \"04cd4375-6fa3-45f3-947a-e5ca050d03ab\") " Nov 25 09:43:54 crc kubenswrapper[4854]: I1125 09:43:54.373071 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/04cd4375-6fa3-45f3-947a-e5ca050d03ab-service-ca" (OuterVolumeSpecName: "service-ca") pod "04cd4375-6fa3-45f3-947a-e5ca050d03ab" (UID: "04cd4375-6fa3-45f3-947a-e5ca050d03ab"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:43:54 crc kubenswrapper[4854]: I1125 09:43:54.373460 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/04cd4375-6fa3-45f3-947a-e5ca050d03ab-console-config" (OuterVolumeSpecName: "console-config") pod "04cd4375-6fa3-45f3-947a-e5ca050d03ab" (UID: "04cd4375-6fa3-45f3-947a-e5ca050d03ab"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:43:54 crc kubenswrapper[4854]: I1125 09:43:54.375383 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/04cd4375-6fa3-45f3-947a-e5ca050d03ab-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "04cd4375-6fa3-45f3-947a-e5ca050d03ab" (UID: "04cd4375-6fa3-45f3-947a-e5ca050d03ab"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:43:54 crc kubenswrapper[4854]: I1125 09:43:54.375471 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/04cd4375-6fa3-45f3-947a-e5ca050d03ab-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "04cd4375-6fa3-45f3-947a-e5ca050d03ab" (UID: "04cd4375-6fa3-45f3-947a-e5ca050d03ab"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:43:54 crc kubenswrapper[4854]: I1125 09:43:54.379001 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04cd4375-6fa3-45f3-947a-e5ca050d03ab-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "04cd4375-6fa3-45f3-947a-e5ca050d03ab" (UID: "04cd4375-6fa3-45f3-947a-e5ca050d03ab"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:43:54 crc kubenswrapper[4854]: I1125 09:43:54.383939 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04cd4375-6fa3-45f3-947a-e5ca050d03ab-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "04cd4375-6fa3-45f3-947a-e5ca050d03ab" (UID: "04cd4375-6fa3-45f3-947a-e5ca050d03ab"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:43:54 crc kubenswrapper[4854]: I1125 09:43:54.383974 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/04cd4375-6fa3-45f3-947a-e5ca050d03ab-kube-api-access-pqdwm" (OuterVolumeSpecName: "kube-api-access-pqdwm") pod "04cd4375-6fa3-45f3-947a-e5ca050d03ab" (UID: "04cd4375-6fa3-45f3-947a-e5ca050d03ab"). InnerVolumeSpecName "kube-api-access-pqdwm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:43:54 crc kubenswrapper[4854]: I1125 09:43:54.475803 4854 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/04cd4375-6fa3-45f3-947a-e5ca050d03ab-console-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:43:54 crc kubenswrapper[4854]: I1125 09:43:54.475843 4854 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/04cd4375-6fa3-45f3-947a-e5ca050d03ab-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:43:54 crc kubenswrapper[4854]: I1125 09:43:54.475858 4854 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/04cd4375-6fa3-45f3-947a-e5ca050d03ab-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:43:54 crc kubenswrapper[4854]: I1125 09:43:54.475871 4854 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/04cd4375-6fa3-45f3-947a-e5ca050d03ab-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:43:54 crc kubenswrapper[4854]: I1125 09:43:54.475886 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pqdwm\" (UniqueName: \"kubernetes.io/projected/04cd4375-6fa3-45f3-947a-e5ca050d03ab-kube-api-access-pqdwm\") on node \"crc\" DevicePath \"\"" Nov 25 09:43:54 crc kubenswrapper[4854]: I1125 09:43:54.475899 4854 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/04cd4375-6fa3-45f3-947a-e5ca050d03ab-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:43:54 crc kubenswrapper[4854]: I1125 09:43:54.475906 4854 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/04cd4375-6fa3-45f3-947a-e5ca050d03ab-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:43:54 crc kubenswrapper[4854]: I1125 09:43:54.784868 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-7796957d69-r8tsd_04cd4375-6fa3-45f3-947a-e5ca050d03ab/console/0.log" Nov 25 09:43:54 crc kubenswrapper[4854]: I1125 09:43:54.784920 4854 generic.go:334] "Generic (PLEG): container finished" podID="04cd4375-6fa3-45f3-947a-e5ca050d03ab" containerID="0eb44c4ead739d4bcffa1f11f145ae1a225284764b69c00b22e7593a8a21e0fc" exitCode=2 Nov 25 09:43:54 crc kubenswrapper[4854]: I1125 09:43:54.784944 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7796957d69-r8tsd" event={"ID":"04cd4375-6fa3-45f3-947a-e5ca050d03ab","Type":"ContainerDied","Data":"0eb44c4ead739d4bcffa1f11f145ae1a225284764b69c00b22e7593a8a21e0fc"} Nov 25 09:43:54 crc kubenswrapper[4854]: I1125 09:43:54.784971 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7796957d69-r8tsd" event={"ID":"04cd4375-6fa3-45f3-947a-e5ca050d03ab","Type":"ContainerDied","Data":"ea7e180bd3e5d45b024d2f848a4869c29d39bc0ce98df224747673b6c418d620"} Nov 25 09:43:54 crc kubenswrapper[4854]: I1125 09:43:54.784987 4854 scope.go:117] "RemoveContainer" containerID="0eb44c4ead739d4bcffa1f11f145ae1a225284764b69c00b22e7593a8a21e0fc" Nov 25 09:43:54 crc kubenswrapper[4854]: I1125 09:43:54.785000 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7796957d69-r8tsd" Nov 25 09:43:54 crc kubenswrapper[4854]: I1125 09:43:54.801467 4854 scope.go:117] "RemoveContainer" containerID="0eb44c4ead739d4bcffa1f11f145ae1a225284764b69c00b22e7593a8a21e0fc" Nov 25 09:43:54 crc kubenswrapper[4854]: E1125 09:43:54.802007 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0eb44c4ead739d4bcffa1f11f145ae1a225284764b69c00b22e7593a8a21e0fc\": container with ID starting with 0eb44c4ead739d4bcffa1f11f145ae1a225284764b69c00b22e7593a8a21e0fc not found: ID does not exist" containerID="0eb44c4ead739d4bcffa1f11f145ae1a225284764b69c00b22e7593a8a21e0fc" Nov 25 09:43:54 crc kubenswrapper[4854]: I1125 09:43:54.802045 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0eb44c4ead739d4bcffa1f11f145ae1a225284764b69c00b22e7593a8a21e0fc"} err="failed to get container status \"0eb44c4ead739d4bcffa1f11f145ae1a225284764b69c00b22e7593a8a21e0fc\": rpc error: code = NotFound desc = could not find container \"0eb44c4ead739d4bcffa1f11f145ae1a225284764b69c00b22e7593a8a21e0fc\": container with ID starting with 0eb44c4ead739d4bcffa1f11f145ae1a225284764b69c00b22e7593a8a21e0fc not found: ID does not exist" Nov 25 09:43:54 crc kubenswrapper[4854]: I1125 09:43:54.811399 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-7796957d69-r8tsd"] Nov 25 09:43:54 crc kubenswrapper[4854]: I1125 09:43:54.815143 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-7796957d69-r8tsd"] Nov 25 09:43:55 crc kubenswrapper[4854]: I1125 09:43:55.023089 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="04cd4375-6fa3-45f3-947a-e5ca050d03ab" path="/var/lib/kubelet/pods/04cd4375-6fa3-45f3-947a-e5ca050d03ab/volumes" Nov 25 09:43:55 crc kubenswrapper[4854]: I1125 09:43:55.029565 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:43:55 crc kubenswrapper[4854]: I1125 09:43:55.029629 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:43:55 crc kubenswrapper[4854]: I1125 09:43:55.029693 4854 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" Nov 25 09:43:55 crc kubenswrapper[4854]: I1125 09:43:55.030276 4854 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5e4500c83ec36e72ad24d0c5585cfb6c0ce2eeda56eaac68cb46d27b8d832338"} pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:43:55 crc kubenswrapper[4854]: I1125 09:43:55.030332 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" containerID="cri-o://5e4500c83ec36e72ad24d0c5585cfb6c0ce2eeda56eaac68cb46d27b8d832338" gracePeriod=600 Nov 25 09:43:55 crc kubenswrapper[4854]: I1125 09:43:55.794877 4854 generic.go:334] "Generic (PLEG): container finished" podID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerID="5e4500c83ec36e72ad24d0c5585cfb6c0ce2eeda56eaac68cb46d27b8d832338" exitCode=0 Nov 25 09:43:55 crc kubenswrapper[4854]: I1125 09:43:55.795469 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerDied","Data":"5e4500c83ec36e72ad24d0c5585cfb6c0ce2eeda56eaac68cb46d27b8d832338"} Nov 25 09:43:55 crc kubenswrapper[4854]: I1125 09:43:55.795532 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerStarted","Data":"c31858ec557866c59816aa8a24974a2c5810f8450caa75c3ab5f3a290cd6674c"} Nov 25 09:43:55 crc kubenswrapper[4854]: I1125 09:43:55.795554 4854 scope.go:117] "RemoveContainer" containerID="0e898d302f42097c6c149260d69f6cdc0bc4088e1b86714c3344a375b16cd7a9" Nov 25 09:45:00 crc kubenswrapper[4854]: I1125 09:45:00.198608 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401065-8mh5z"] Nov 25 09:45:00 crc kubenswrapper[4854]: E1125 09:45:00.199615 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04cd4375-6fa3-45f3-947a-e5ca050d03ab" containerName="console" Nov 25 09:45:00 crc kubenswrapper[4854]: I1125 09:45:00.199637 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="04cd4375-6fa3-45f3-947a-e5ca050d03ab" containerName="console" Nov 25 09:45:00 crc kubenswrapper[4854]: I1125 09:45:00.199885 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="04cd4375-6fa3-45f3-947a-e5ca050d03ab" containerName="console" Nov 25 09:45:00 crc kubenswrapper[4854]: I1125 09:45:00.200553 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-8mh5z" Nov 25 09:45:00 crc kubenswrapper[4854]: I1125 09:45:00.204655 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 09:45:00 crc kubenswrapper[4854]: I1125 09:45:00.204659 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 09:45:00 crc kubenswrapper[4854]: I1125 09:45:00.208076 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401065-8mh5z"] Nov 25 09:45:00 crc kubenswrapper[4854]: I1125 09:45:00.286106 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l5f9n\" (UniqueName: \"kubernetes.io/projected/5d8869a0-3da4-4d51-8027-f63d7999f409-kube-api-access-l5f9n\") pod \"collect-profiles-29401065-8mh5z\" (UID: \"5d8869a0-3da4-4d51-8027-f63d7999f409\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-8mh5z" Nov 25 09:45:00 crc kubenswrapper[4854]: I1125 09:45:00.286155 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5d8869a0-3da4-4d51-8027-f63d7999f409-config-volume\") pod \"collect-profiles-29401065-8mh5z\" (UID: \"5d8869a0-3da4-4d51-8027-f63d7999f409\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-8mh5z" Nov 25 09:45:00 crc kubenswrapper[4854]: I1125 09:45:00.286170 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5d8869a0-3da4-4d51-8027-f63d7999f409-secret-volume\") pod \"collect-profiles-29401065-8mh5z\" (UID: \"5d8869a0-3da4-4d51-8027-f63d7999f409\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-8mh5z" Nov 25 09:45:00 crc kubenswrapper[4854]: I1125 09:45:00.388117 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l5f9n\" (UniqueName: \"kubernetes.io/projected/5d8869a0-3da4-4d51-8027-f63d7999f409-kube-api-access-l5f9n\") pod \"collect-profiles-29401065-8mh5z\" (UID: \"5d8869a0-3da4-4d51-8027-f63d7999f409\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-8mh5z" Nov 25 09:45:00 crc kubenswrapper[4854]: I1125 09:45:00.388220 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5d8869a0-3da4-4d51-8027-f63d7999f409-config-volume\") pod \"collect-profiles-29401065-8mh5z\" (UID: \"5d8869a0-3da4-4d51-8027-f63d7999f409\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-8mh5z" Nov 25 09:45:00 crc kubenswrapper[4854]: I1125 09:45:00.388255 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5d8869a0-3da4-4d51-8027-f63d7999f409-secret-volume\") pod \"collect-profiles-29401065-8mh5z\" (UID: \"5d8869a0-3da4-4d51-8027-f63d7999f409\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-8mh5z" Nov 25 09:45:00 crc kubenswrapper[4854]: I1125 09:45:00.389539 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5d8869a0-3da4-4d51-8027-f63d7999f409-config-volume\") pod \"collect-profiles-29401065-8mh5z\" (UID: \"5d8869a0-3da4-4d51-8027-f63d7999f409\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-8mh5z" Nov 25 09:45:00 crc kubenswrapper[4854]: I1125 09:45:00.396212 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5d8869a0-3da4-4d51-8027-f63d7999f409-secret-volume\") pod \"collect-profiles-29401065-8mh5z\" (UID: \"5d8869a0-3da4-4d51-8027-f63d7999f409\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-8mh5z" Nov 25 09:45:00 crc kubenswrapper[4854]: I1125 09:45:00.408876 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l5f9n\" (UniqueName: \"kubernetes.io/projected/5d8869a0-3da4-4d51-8027-f63d7999f409-kube-api-access-l5f9n\") pod \"collect-profiles-29401065-8mh5z\" (UID: \"5d8869a0-3da4-4d51-8027-f63d7999f409\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-8mh5z" Nov 25 09:45:00 crc kubenswrapper[4854]: I1125 09:45:00.533024 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-8mh5z" Nov 25 09:45:00 crc kubenswrapper[4854]: I1125 09:45:00.758569 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401065-8mh5z"] Nov 25 09:45:01 crc kubenswrapper[4854]: I1125 09:45:01.240366 4854 generic.go:334] "Generic (PLEG): container finished" podID="5d8869a0-3da4-4d51-8027-f63d7999f409" containerID="688680df371a53c6c53376e5208f914e84bf749bb5c249ca9df10fe1ed3c6871" exitCode=0 Nov 25 09:45:01 crc kubenswrapper[4854]: I1125 09:45:01.240429 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-8mh5z" event={"ID":"5d8869a0-3da4-4d51-8027-f63d7999f409","Type":"ContainerDied","Data":"688680df371a53c6c53376e5208f914e84bf749bb5c249ca9df10fe1ed3c6871"} Nov 25 09:45:01 crc kubenswrapper[4854]: I1125 09:45:01.240722 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-8mh5z" event={"ID":"5d8869a0-3da4-4d51-8027-f63d7999f409","Type":"ContainerStarted","Data":"6067255e1e88217b27cc756ae16324178027a05a181013b44df9bb9d9d01c632"} Nov 25 09:45:02 crc kubenswrapper[4854]: I1125 09:45:02.579296 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-8mh5z" Nov 25 09:45:02 crc kubenswrapper[4854]: I1125 09:45:02.619607 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5d8869a0-3da4-4d51-8027-f63d7999f409-config-volume\") pod \"5d8869a0-3da4-4d51-8027-f63d7999f409\" (UID: \"5d8869a0-3da4-4d51-8027-f63d7999f409\") " Nov 25 09:45:02 crc kubenswrapper[4854]: I1125 09:45:02.619741 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5d8869a0-3da4-4d51-8027-f63d7999f409-secret-volume\") pod \"5d8869a0-3da4-4d51-8027-f63d7999f409\" (UID: \"5d8869a0-3da4-4d51-8027-f63d7999f409\") " Nov 25 09:45:02 crc kubenswrapper[4854]: I1125 09:45:02.619802 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l5f9n\" (UniqueName: \"kubernetes.io/projected/5d8869a0-3da4-4d51-8027-f63d7999f409-kube-api-access-l5f9n\") pod \"5d8869a0-3da4-4d51-8027-f63d7999f409\" (UID: \"5d8869a0-3da4-4d51-8027-f63d7999f409\") " Nov 25 09:45:02 crc kubenswrapper[4854]: I1125 09:45:02.620769 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5d8869a0-3da4-4d51-8027-f63d7999f409-config-volume" (OuterVolumeSpecName: "config-volume") pod "5d8869a0-3da4-4d51-8027-f63d7999f409" (UID: "5d8869a0-3da4-4d51-8027-f63d7999f409"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:45:02 crc kubenswrapper[4854]: I1125 09:45:02.625599 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d8869a0-3da4-4d51-8027-f63d7999f409-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "5d8869a0-3da4-4d51-8027-f63d7999f409" (UID: "5d8869a0-3da4-4d51-8027-f63d7999f409"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:45:02 crc kubenswrapper[4854]: I1125 09:45:02.626278 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d8869a0-3da4-4d51-8027-f63d7999f409-kube-api-access-l5f9n" (OuterVolumeSpecName: "kube-api-access-l5f9n") pod "5d8869a0-3da4-4d51-8027-f63d7999f409" (UID: "5d8869a0-3da4-4d51-8027-f63d7999f409"). InnerVolumeSpecName "kube-api-access-l5f9n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:45:02 crc kubenswrapper[4854]: I1125 09:45:02.720804 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l5f9n\" (UniqueName: \"kubernetes.io/projected/5d8869a0-3da4-4d51-8027-f63d7999f409-kube-api-access-l5f9n\") on node \"crc\" DevicePath \"\"" Nov 25 09:45:02 crc kubenswrapper[4854]: I1125 09:45:02.721078 4854 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5d8869a0-3da4-4d51-8027-f63d7999f409-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 09:45:02 crc kubenswrapper[4854]: I1125 09:45:02.721182 4854 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5d8869a0-3da4-4d51-8027-f63d7999f409-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 09:45:03 crc kubenswrapper[4854]: I1125 09:45:03.254578 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-8mh5z" event={"ID":"5d8869a0-3da4-4d51-8027-f63d7999f409","Type":"ContainerDied","Data":"6067255e1e88217b27cc756ae16324178027a05a181013b44df9bb9d9d01c632"} Nov 25 09:45:03 crc kubenswrapper[4854]: I1125 09:45:03.254627 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6067255e1e88217b27cc756ae16324178027a05a181013b44df9bb9d9d01c632" Nov 25 09:45:03 crc kubenswrapper[4854]: I1125 09:45:03.254692 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-8mh5z" Nov 25 09:45:55 crc kubenswrapper[4854]: I1125 09:45:55.029187 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:45:55 crc kubenswrapper[4854]: I1125 09:45:55.029898 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:46:25 crc kubenswrapper[4854]: I1125 09:46:25.029319 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:46:25 crc kubenswrapper[4854]: I1125 09:46:25.030079 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:46:55 crc kubenswrapper[4854]: I1125 09:46:55.029198 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:46:55 crc kubenswrapper[4854]: I1125 09:46:55.029754 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:46:55 crc kubenswrapper[4854]: I1125 09:46:55.029813 4854 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" Nov 25 09:46:55 crc kubenswrapper[4854]: I1125 09:46:55.030484 4854 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c31858ec557866c59816aa8a24974a2c5810f8450caa75c3ab5f3a290cd6674c"} pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:46:55 crc kubenswrapper[4854]: I1125 09:46:55.030557 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" containerID="cri-o://c31858ec557866c59816aa8a24974a2c5810f8450caa75c3ab5f3a290cd6674c" gracePeriod=600 Nov 25 09:46:55 crc kubenswrapper[4854]: I1125 09:46:55.114436 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rbs4l"] Nov 25 09:46:55 crc kubenswrapper[4854]: E1125 09:46:55.114813 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d8869a0-3da4-4d51-8027-f63d7999f409" containerName="collect-profiles" Nov 25 09:46:55 crc kubenswrapper[4854]: I1125 09:46:55.114834 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d8869a0-3da4-4d51-8027-f63d7999f409" containerName="collect-profiles" Nov 25 09:46:55 crc kubenswrapper[4854]: I1125 09:46:55.114970 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d8869a0-3da4-4d51-8027-f63d7999f409" containerName="collect-profiles" Nov 25 09:46:55 crc kubenswrapper[4854]: I1125 09:46:55.115996 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rbs4l" Nov 25 09:46:55 crc kubenswrapper[4854]: I1125 09:46:55.119628 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 25 09:46:55 crc kubenswrapper[4854]: I1125 09:46:55.125407 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rbs4l"] Nov 25 09:46:55 crc kubenswrapper[4854]: I1125 09:46:55.235042 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/382f3712-78a8-4c24-bebc-530f145931e6-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rbs4l\" (UID: \"382f3712-78a8-4c24-bebc-530f145931e6\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rbs4l" Nov 25 09:46:55 crc kubenswrapper[4854]: I1125 09:46:55.235152 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2dsbn\" (UniqueName: \"kubernetes.io/projected/382f3712-78a8-4c24-bebc-530f145931e6-kube-api-access-2dsbn\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rbs4l\" (UID: \"382f3712-78a8-4c24-bebc-530f145931e6\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rbs4l" Nov 25 09:46:55 crc kubenswrapper[4854]: I1125 09:46:55.235198 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/382f3712-78a8-4c24-bebc-530f145931e6-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rbs4l\" (UID: \"382f3712-78a8-4c24-bebc-530f145931e6\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rbs4l" Nov 25 09:46:55 crc kubenswrapper[4854]: I1125 09:46:55.337282 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2dsbn\" (UniqueName: \"kubernetes.io/projected/382f3712-78a8-4c24-bebc-530f145931e6-kube-api-access-2dsbn\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rbs4l\" (UID: \"382f3712-78a8-4c24-bebc-530f145931e6\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rbs4l" Nov 25 09:46:55 crc kubenswrapper[4854]: I1125 09:46:55.337350 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/382f3712-78a8-4c24-bebc-530f145931e6-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rbs4l\" (UID: \"382f3712-78a8-4c24-bebc-530f145931e6\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rbs4l" Nov 25 09:46:55 crc kubenswrapper[4854]: I1125 09:46:55.337466 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/382f3712-78a8-4c24-bebc-530f145931e6-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rbs4l\" (UID: \"382f3712-78a8-4c24-bebc-530f145931e6\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rbs4l" Nov 25 09:46:55 crc kubenswrapper[4854]: I1125 09:46:55.337992 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/382f3712-78a8-4c24-bebc-530f145931e6-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rbs4l\" (UID: \"382f3712-78a8-4c24-bebc-530f145931e6\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rbs4l" Nov 25 09:46:55 crc kubenswrapper[4854]: I1125 09:46:55.338505 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/382f3712-78a8-4c24-bebc-530f145931e6-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rbs4l\" (UID: \"382f3712-78a8-4c24-bebc-530f145931e6\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rbs4l" Nov 25 09:46:55 crc kubenswrapper[4854]: I1125 09:46:55.358867 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2dsbn\" (UniqueName: \"kubernetes.io/projected/382f3712-78a8-4c24-bebc-530f145931e6-kube-api-access-2dsbn\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rbs4l\" (UID: \"382f3712-78a8-4c24-bebc-530f145931e6\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rbs4l" Nov 25 09:46:55 crc kubenswrapper[4854]: I1125 09:46:55.437413 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rbs4l" Nov 25 09:46:55 crc kubenswrapper[4854]: I1125 09:46:55.630840 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rbs4l"] Nov 25 09:46:55 crc kubenswrapper[4854]: I1125 09:46:55.943043 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rbs4l" event={"ID":"382f3712-78a8-4c24-bebc-530f145931e6","Type":"ContainerStarted","Data":"cc88f7f49ae46c272db49e16f4365e50f7eacb362ec5300e25003d361ba2a25a"} Nov 25 09:46:55 crc kubenswrapper[4854]: I1125 09:46:55.943427 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rbs4l" event={"ID":"382f3712-78a8-4c24-bebc-530f145931e6","Type":"ContainerStarted","Data":"f58ba696c496ce161f811366bbe6a5089d370ad54d1ea38ce5da5e803e9e3e87"} Nov 25 09:46:55 crc kubenswrapper[4854]: I1125 09:46:55.946177 4854 generic.go:334] "Generic (PLEG): container finished" podID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerID="c31858ec557866c59816aa8a24974a2c5810f8450caa75c3ab5f3a290cd6674c" exitCode=0 Nov 25 09:46:55 crc kubenswrapper[4854]: I1125 09:46:55.946248 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerDied","Data":"c31858ec557866c59816aa8a24974a2c5810f8450caa75c3ab5f3a290cd6674c"} Nov 25 09:46:55 crc kubenswrapper[4854]: I1125 09:46:55.946390 4854 scope.go:117] "RemoveContainer" containerID="5e4500c83ec36e72ad24d0c5585cfb6c0ce2eeda56eaac68cb46d27b8d832338" Nov 25 09:46:56 crc kubenswrapper[4854]: I1125 09:46:56.959514 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerStarted","Data":"9b86f8830130949aa485656adb170193fb5c4c66ab6f65d45cd6ab7997ce2f21"} Nov 25 09:46:57 crc kubenswrapper[4854]: I1125 09:46:57.971234 4854 generic.go:334] "Generic (PLEG): container finished" podID="382f3712-78a8-4c24-bebc-530f145931e6" containerID="cc88f7f49ae46c272db49e16f4365e50f7eacb362ec5300e25003d361ba2a25a" exitCode=0 Nov 25 09:46:57 crc kubenswrapper[4854]: I1125 09:46:57.971365 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rbs4l" event={"ID":"382f3712-78a8-4c24-bebc-530f145931e6","Type":"ContainerDied","Data":"cc88f7f49ae46c272db49e16f4365e50f7eacb362ec5300e25003d361ba2a25a"} Nov 25 09:46:57 crc kubenswrapper[4854]: I1125 09:46:57.979129 4854 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 09:47:00 crc kubenswrapper[4854]: I1125 09:47:00.997542 4854 generic.go:334] "Generic (PLEG): container finished" podID="382f3712-78a8-4c24-bebc-530f145931e6" containerID="684cb5652025cac6c932f19311a12aa7c00b2b49c3ac9a4c755dbf8d208c4053" exitCode=0 Nov 25 09:47:00 crc kubenswrapper[4854]: I1125 09:47:00.997635 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rbs4l" event={"ID":"382f3712-78a8-4c24-bebc-530f145931e6","Type":"ContainerDied","Data":"684cb5652025cac6c932f19311a12aa7c00b2b49c3ac9a4c755dbf8d208c4053"} Nov 25 09:47:02 crc kubenswrapper[4854]: I1125 09:47:02.007343 4854 generic.go:334] "Generic (PLEG): container finished" podID="382f3712-78a8-4c24-bebc-530f145931e6" containerID="9fc32e3aa9491d4332947f6bae42fcbee22c6c12e371d6043e66624d32b47e02" exitCode=0 Nov 25 09:47:02 crc kubenswrapper[4854]: I1125 09:47:02.007425 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rbs4l" event={"ID":"382f3712-78a8-4c24-bebc-530f145931e6","Type":"ContainerDied","Data":"9fc32e3aa9491d4332947f6bae42fcbee22c6c12e371d6043e66624d32b47e02"} Nov 25 09:47:03 crc kubenswrapper[4854]: I1125 09:47:03.247562 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rbs4l" Nov 25 09:47:03 crc kubenswrapper[4854]: I1125 09:47:03.400734 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/382f3712-78a8-4c24-bebc-530f145931e6-util\") pod \"382f3712-78a8-4c24-bebc-530f145931e6\" (UID: \"382f3712-78a8-4c24-bebc-530f145931e6\") " Nov 25 09:47:03 crc kubenswrapper[4854]: I1125 09:47:03.401067 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2dsbn\" (UniqueName: \"kubernetes.io/projected/382f3712-78a8-4c24-bebc-530f145931e6-kube-api-access-2dsbn\") pod \"382f3712-78a8-4c24-bebc-530f145931e6\" (UID: \"382f3712-78a8-4c24-bebc-530f145931e6\") " Nov 25 09:47:03 crc kubenswrapper[4854]: I1125 09:47:03.401144 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/382f3712-78a8-4c24-bebc-530f145931e6-bundle\") pod \"382f3712-78a8-4c24-bebc-530f145931e6\" (UID: \"382f3712-78a8-4c24-bebc-530f145931e6\") " Nov 25 09:47:03 crc kubenswrapper[4854]: I1125 09:47:03.402963 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/382f3712-78a8-4c24-bebc-530f145931e6-bundle" (OuterVolumeSpecName: "bundle") pod "382f3712-78a8-4c24-bebc-530f145931e6" (UID: "382f3712-78a8-4c24-bebc-530f145931e6"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:47:03 crc kubenswrapper[4854]: I1125 09:47:03.410261 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/382f3712-78a8-4c24-bebc-530f145931e6-util" (OuterVolumeSpecName: "util") pod "382f3712-78a8-4c24-bebc-530f145931e6" (UID: "382f3712-78a8-4c24-bebc-530f145931e6"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:47:03 crc kubenswrapper[4854]: I1125 09:47:03.415945 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/382f3712-78a8-4c24-bebc-530f145931e6-kube-api-access-2dsbn" (OuterVolumeSpecName: "kube-api-access-2dsbn") pod "382f3712-78a8-4c24-bebc-530f145931e6" (UID: "382f3712-78a8-4c24-bebc-530f145931e6"). InnerVolumeSpecName "kube-api-access-2dsbn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:47:03 crc kubenswrapper[4854]: I1125 09:47:03.502921 4854 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/382f3712-78a8-4c24-bebc-530f145931e6-util\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:03 crc kubenswrapper[4854]: I1125 09:47:03.503209 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2dsbn\" (UniqueName: \"kubernetes.io/projected/382f3712-78a8-4c24-bebc-530f145931e6-kube-api-access-2dsbn\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:03 crc kubenswrapper[4854]: I1125 09:47:03.503281 4854 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/382f3712-78a8-4c24-bebc-530f145931e6-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:04 crc kubenswrapper[4854]: I1125 09:47:04.023541 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rbs4l" event={"ID":"382f3712-78a8-4c24-bebc-530f145931e6","Type":"ContainerDied","Data":"f58ba696c496ce161f811366bbe6a5089d370ad54d1ea38ce5da5e803e9e3e87"} Nov 25 09:47:04 crc kubenswrapper[4854]: I1125 09:47:04.023622 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f58ba696c496ce161f811366bbe6a5089d370ad54d1ea38ce5da5e803e9e3e87" Nov 25 09:47:04 crc kubenswrapper[4854]: I1125 09:47:04.023971 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rbs4l" Nov 25 09:47:06 crc kubenswrapper[4854]: I1125 09:47:06.365771 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-gt7mq"] Nov 25 09:47:06 crc kubenswrapper[4854]: I1125 09:47:06.368079 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="ovn-controller" containerID="cri-o://6acdb40732904b212fc33575cf482c9e194722160e66442aee0619cda7f2c755" gracePeriod=30 Nov 25 09:47:06 crc kubenswrapper[4854]: I1125 09:47:06.368130 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="sbdb" containerID="cri-o://50a479ca6721781365a54ff1fd8aad2b8b97e0f298fe46b62c51cbc2434e980b" gracePeriod=30 Nov 25 09:47:06 crc kubenswrapper[4854]: I1125 09:47:06.368133 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://6a694f4eb324ec480c999e6d57a53599b61b5bb42c3f3342e4672bc90b2e095e" gracePeriod=30 Nov 25 09:47:06 crc kubenswrapper[4854]: I1125 09:47:06.368210 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="ovn-acl-logging" containerID="cri-o://225d630b20a5da5172de2d03044794bbf6ac21d95421a502d21c642c92421e83" gracePeriod=30 Nov 25 09:47:06 crc kubenswrapper[4854]: I1125 09:47:06.368245 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="nbdb" containerID="cri-o://e15435107b0cdf606bf3d4f78ddfec8b6641611a912f3fea51ba1ffc3031df3b" gracePeriod=30 Nov 25 09:47:06 crc kubenswrapper[4854]: I1125 09:47:06.368115 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="kube-rbac-proxy-node" containerID="cri-o://b34a8b788c0ff6e0d0cb1dc375ec9e2ae9a748c911713adc69f9e37f576222ec" gracePeriod=30 Nov 25 09:47:06 crc kubenswrapper[4854]: I1125 09:47:06.368100 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="northd" containerID="cri-o://eb143029f5bbd51e49119aba69fcb5ecb8febacbdfa089ccef1298a4f372278b" gracePeriod=30 Nov 25 09:47:06 crc kubenswrapper[4854]: I1125 09:47:06.396859 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="ovnkube-controller" containerID="cri-o://6d9f4f27a0dee1b92577e22bdfe67d21b43a1be64519f231492aeb22d0ba2e0b" gracePeriod=30 Nov 25 09:47:07 crc kubenswrapper[4854]: I1125 09:47:07.047599 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-txnt5_a0e9f759-2eea-43cd-9e0a-6f149785c431/kube-multus/1.log" Nov 25 09:47:07 crc kubenswrapper[4854]: I1125 09:47:07.048354 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-txnt5_a0e9f759-2eea-43cd-9e0a-6f149785c431/kube-multus/0.log" Nov 25 09:47:07 crc kubenswrapper[4854]: I1125 09:47:07.048400 4854 generic.go:334] "Generic (PLEG): container finished" podID="a0e9f759-2eea-43cd-9e0a-6f149785c431" containerID="5feac493298404321ee4fceca3870193d9a2cb42b9f7d769848893b756653fb9" exitCode=2 Nov 25 09:47:07 crc kubenswrapper[4854]: I1125 09:47:07.048469 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-txnt5" event={"ID":"a0e9f759-2eea-43cd-9e0a-6f149785c431","Type":"ContainerDied","Data":"5feac493298404321ee4fceca3870193d9a2cb42b9f7d769848893b756653fb9"} Nov 25 09:47:07 crc kubenswrapper[4854]: I1125 09:47:07.048504 4854 scope.go:117] "RemoveContainer" containerID="703f0c0040e7ee5e54520e325bb2ef83ad76b6667cb92c44b879261693551c20" Nov 25 09:47:07 crc kubenswrapper[4854]: I1125 09:47:07.049046 4854 scope.go:117] "RemoveContainer" containerID="5feac493298404321ee4fceca3870193d9a2cb42b9f7d769848893b756653fb9" Nov 25 09:47:07 crc kubenswrapper[4854]: E1125 09:47:07.049288 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-txnt5_openshift-multus(a0e9f759-2eea-43cd-9e0a-6f149785c431)\"" pod="openshift-multus/multus-txnt5" podUID="a0e9f759-2eea-43cd-9e0a-6f149785c431" Nov 25 09:47:07 crc kubenswrapper[4854]: I1125 09:47:07.050861 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gt7mq_b2e0e8c4-35b0-4ca8-acec-d6c94cf76354/ovnkube-controller/2.log" Nov 25 09:47:07 crc kubenswrapper[4854]: I1125 09:47:07.052762 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gt7mq_b2e0e8c4-35b0-4ca8-acec-d6c94cf76354/ovn-acl-logging/0.log" Nov 25 09:47:07 crc kubenswrapper[4854]: I1125 09:47:07.053458 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gt7mq_b2e0e8c4-35b0-4ca8-acec-d6c94cf76354/ovn-controller/0.log" Nov 25 09:47:07 crc kubenswrapper[4854]: I1125 09:47:07.053936 4854 generic.go:334] "Generic (PLEG): container finished" podID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerID="6d9f4f27a0dee1b92577e22bdfe67d21b43a1be64519f231492aeb22d0ba2e0b" exitCode=0 Nov 25 09:47:07 crc kubenswrapper[4854]: I1125 09:47:07.053956 4854 generic.go:334] "Generic (PLEG): container finished" podID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerID="50a479ca6721781365a54ff1fd8aad2b8b97e0f298fe46b62c51cbc2434e980b" exitCode=0 Nov 25 09:47:07 crc kubenswrapper[4854]: I1125 09:47:07.053966 4854 generic.go:334] "Generic (PLEG): container finished" podID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerID="e15435107b0cdf606bf3d4f78ddfec8b6641611a912f3fea51ba1ffc3031df3b" exitCode=0 Nov 25 09:47:07 crc kubenswrapper[4854]: I1125 09:47:07.053975 4854 generic.go:334] "Generic (PLEG): container finished" podID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerID="eb143029f5bbd51e49119aba69fcb5ecb8febacbdfa089ccef1298a4f372278b" exitCode=0 Nov 25 09:47:07 crc kubenswrapper[4854]: I1125 09:47:07.053982 4854 generic.go:334] "Generic (PLEG): container finished" podID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerID="225d630b20a5da5172de2d03044794bbf6ac21d95421a502d21c642c92421e83" exitCode=143 Nov 25 09:47:07 crc kubenswrapper[4854]: I1125 09:47:07.053990 4854 generic.go:334] "Generic (PLEG): container finished" podID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerID="6acdb40732904b212fc33575cf482c9e194722160e66442aee0619cda7f2c755" exitCode=143 Nov 25 09:47:07 crc kubenswrapper[4854]: I1125 09:47:07.054027 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" event={"ID":"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354","Type":"ContainerDied","Data":"6d9f4f27a0dee1b92577e22bdfe67d21b43a1be64519f231492aeb22d0ba2e0b"} Nov 25 09:47:07 crc kubenswrapper[4854]: I1125 09:47:07.054079 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" event={"ID":"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354","Type":"ContainerDied","Data":"50a479ca6721781365a54ff1fd8aad2b8b97e0f298fe46b62c51cbc2434e980b"} Nov 25 09:47:07 crc kubenswrapper[4854]: I1125 09:47:07.054090 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" event={"ID":"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354","Type":"ContainerDied","Data":"e15435107b0cdf606bf3d4f78ddfec8b6641611a912f3fea51ba1ffc3031df3b"} Nov 25 09:47:07 crc kubenswrapper[4854]: I1125 09:47:07.054100 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" event={"ID":"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354","Type":"ContainerDied","Data":"eb143029f5bbd51e49119aba69fcb5ecb8febacbdfa089ccef1298a4f372278b"} Nov 25 09:47:07 crc kubenswrapper[4854]: I1125 09:47:07.054109 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" event={"ID":"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354","Type":"ContainerDied","Data":"225d630b20a5da5172de2d03044794bbf6ac21d95421a502d21c642c92421e83"} Nov 25 09:47:07 crc kubenswrapper[4854]: I1125 09:47:07.054119 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" event={"ID":"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354","Type":"ContainerDied","Data":"6acdb40732904b212fc33575cf482c9e194722160e66442aee0619cda7f2c755"} Nov 25 09:47:07 crc kubenswrapper[4854]: I1125 09:47:07.109288 4854 scope.go:117] "RemoveContainer" containerID="1a6d7c2acd862f8700bbd5c57584e1b55c5f1da6ff0c8431f1cd400508713d7c" Nov 25 09:47:07 crc kubenswrapper[4854]: E1125 09:47:07.266077 4854 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e15435107b0cdf606bf3d4f78ddfec8b6641611a912f3fea51ba1ffc3031df3b is running failed: container process not found" containerID="e15435107b0cdf606bf3d4f78ddfec8b6641611a912f3fea51ba1ffc3031df3b" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"nb\"\n"] Nov 25 09:47:07 crc kubenswrapper[4854]: E1125 09:47:07.266105 4854 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 50a479ca6721781365a54ff1fd8aad2b8b97e0f298fe46b62c51cbc2434e980b is running failed: container process not found" containerID="50a479ca6721781365a54ff1fd8aad2b8b97e0f298fe46b62c51cbc2434e980b" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"sb\"\n"] Nov 25 09:47:07 crc kubenswrapper[4854]: E1125 09:47:07.266577 4854 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 50a479ca6721781365a54ff1fd8aad2b8b97e0f298fe46b62c51cbc2434e980b is running failed: container process not found" containerID="50a479ca6721781365a54ff1fd8aad2b8b97e0f298fe46b62c51cbc2434e980b" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"sb\"\n"] Nov 25 09:47:07 crc kubenswrapper[4854]: E1125 09:47:07.266647 4854 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e15435107b0cdf606bf3d4f78ddfec8b6641611a912f3fea51ba1ffc3031df3b is running failed: container process not found" containerID="e15435107b0cdf606bf3d4f78ddfec8b6641611a912f3fea51ba1ffc3031df3b" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"nb\"\n"] Nov 25 09:47:07 crc kubenswrapper[4854]: E1125 09:47:07.267248 4854 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 50a479ca6721781365a54ff1fd8aad2b8b97e0f298fe46b62c51cbc2434e980b is running failed: container process not found" containerID="50a479ca6721781365a54ff1fd8aad2b8b97e0f298fe46b62c51cbc2434e980b" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"sb\"\n"] Nov 25 09:47:07 crc kubenswrapper[4854]: E1125 09:47:07.267321 4854 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 50a479ca6721781365a54ff1fd8aad2b8b97e0f298fe46b62c51cbc2434e980b is running failed: container process not found" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="sbdb" Nov 25 09:47:07 crc kubenswrapper[4854]: E1125 09:47:07.267257 4854 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e15435107b0cdf606bf3d4f78ddfec8b6641611a912f3fea51ba1ffc3031df3b is running failed: container process not found" containerID="e15435107b0cdf606bf3d4f78ddfec8b6641611a912f3fea51ba1ffc3031df3b" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"nb\"\n"] Nov 25 09:47:07 crc kubenswrapper[4854]: E1125 09:47:07.267399 4854 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e15435107b0cdf606bf3d4f78ddfec8b6641611a912f3fea51ba1ffc3031df3b is running failed: container process not found" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="nbdb" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.060736 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-txnt5_a0e9f759-2eea-43cd-9e0a-6f149785c431/kube-multus/1.log" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.064765 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gt7mq_b2e0e8c4-35b0-4ca8-acec-d6c94cf76354/ovn-acl-logging/0.log" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.065394 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gt7mq_b2e0e8c4-35b0-4ca8-acec-d6c94cf76354/ovn-controller/0.log" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.065789 4854 generic.go:334] "Generic (PLEG): container finished" podID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerID="6a694f4eb324ec480c999e6d57a53599b61b5bb42c3f3342e4672bc90b2e095e" exitCode=0 Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.065894 4854 generic.go:334] "Generic (PLEG): container finished" podID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerID="b34a8b788c0ff6e0d0cb1dc375ec9e2ae9a748c911713adc69f9e37f576222ec" exitCode=0 Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.065889 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" event={"ID":"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354","Type":"ContainerDied","Data":"6a694f4eb324ec480c999e6d57a53599b61b5bb42c3f3342e4672bc90b2e095e"} Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.066083 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" event={"ID":"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354","Type":"ContainerDied","Data":"b34a8b788c0ff6e0d0cb1dc375ec9e2ae9a748c911713adc69f9e37f576222ec"} Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.124335 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gt7mq_b2e0e8c4-35b0-4ca8-acec-d6c94cf76354/ovn-acl-logging/0.log" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.125490 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gt7mq_b2e0e8c4-35b0-4ca8-acec-d6c94cf76354/ovn-controller/0.log" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.126104 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.187070 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-var-lib-cni-networks-ovn-kubernetes\") pod \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.187139 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-ovnkube-script-lib\") pod \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.187167 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-var-lib-openvswitch\") pod \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.187195 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r24xl\" (UniqueName: \"kubernetes.io/projected/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-kube-api-access-r24xl\") pod \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.187185 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" (UID: "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.187222 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-node-log\") pod \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.187274 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-node-log" (OuterVolumeSpecName: "node-log") pod "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" (UID: "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.187305 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-kubelet\") pod \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.187331 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-etc-openvswitch\") pod \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.187377 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-env-overrides\") pod \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.187415 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-slash\") pod \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.187495 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-ovn-node-metrics-cert\") pod \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.187531 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-run-netns\") pod \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.187598 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-run-ovn\") pod \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.187628 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-cni-netd\") pod \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.187660 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-log-socket\") pod \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.187739 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-ovnkube-config\") pod \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.187782 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-systemd-units\") pod \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.187815 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-run-ovn-kubernetes\") pod \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.187865 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-run-systemd\") pod \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.187890 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-run-openvswitch\") pod \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.187925 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-cni-bin\") pod \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\" (UID: \"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354\") " Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.187305 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" (UID: "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.187769 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" (UID: "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.188705 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" (UID: "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.188737 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" (UID: "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.188868 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" (UID: "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.188753 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" (UID: "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.188768 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" (UID: "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.188787 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-log-socket" (OuterVolumeSpecName: "log-socket") pod "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" (UID: "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.188808 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" (UID: "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.188830 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" (UID: "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.188839 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" (UID: "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.189231 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" (UID: "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.189260 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-slash" (OuterVolumeSpecName: "host-slash") pod "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" (UID: "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.189289 4854 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.189294 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" (UID: "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.189312 4854 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.189326 4854 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.189336 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" (UID: "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.189342 4854 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.189374 4854 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.189384 4854 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-log-socket\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.189393 4854 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.189401 4854 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.189413 4854 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.189423 4854 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.189433 4854 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.189443 4854 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.189455 4854 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-node-log\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.198931 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" (UID: "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.206711 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" (UID: "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.217343 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-kube-api-access-r24xl" (OuterVolumeSpecName: "kube-api-access-r24xl") pod "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" (UID: "b2e0e8c4-35b0-4ca8-acec-d6c94cf76354"). InnerVolumeSpecName "kube-api-access-r24xl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.224731 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-k4sdl"] Nov 25 09:47:08 crc kubenswrapper[4854]: E1125 09:47:08.224956 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="ovn-acl-logging" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.224968 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="ovn-acl-logging" Nov 25 09:47:08 crc kubenswrapper[4854]: E1125 09:47:08.224979 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="ovnkube-controller" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.224984 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="ovnkube-controller" Nov 25 09:47:08 crc kubenswrapper[4854]: E1125 09:47:08.224992 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="kube-rbac-proxy-node" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.224998 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="kube-rbac-proxy-node" Nov 25 09:47:08 crc kubenswrapper[4854]: E1125 09:47:08.225005 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="northd" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.225012 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="northd" Nov 25 09:47:08 crc kubenswrapper[4854]: E1125 09:47:08.225023 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="kubecfg-setup" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.225029 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="kubecfg-setup" Nov 25 09:47:08 crc kubenswrapper[4854]: E1125 09:47:08.225037 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="ovnkube-controller" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.225043 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="ovnkube-controller" Nov 25 09:47:08 crc kubenswrapper[4854]: E1125 09:47:08.225049 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="ovnkube-controller" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.225054 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="ovnkube-controller" Nov 25 09:47:08 crc kubenswrapper[4854]: E1125 09:47:08.225063 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="nbdb" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.225068 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="nbdb" Nov 25 09:47:08 crc kubenswrapper[4854]: E1125 09:47:08.225075 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="382f3712-78a8-4c24-bebc-530f145931e6" containerName="pull" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.225080 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="382f3712-78a8-4c24-bebc-530f145931e6" containerName="pull" Nov 25 09:47:08 crc kubenswrapper[4854]: E1125 09:47:08.225088 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="382f3712-78a8-4c24-bebc-530f145931e6" containerName="util" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.225094 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="382f3712-78a8-4c24-bebc-530f145931e6" containerName="util" Nov 25 09:47:08 crc kubenswrapper[4854]: E1125 09:47:08.225104 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="382f3712-78a8-4c24-bebc-530f145931e6" containerName="extract" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.225109 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="382f3712-78a8-4c24-bebc-530f145931e6" containerName="extract" Nov 25 09:47:08 crc kubenswrapper[4854]: E1125 09:47:08.225117 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="ovn-controller" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.225123 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="ovn-controller" Nov 25 09:47:08 crc kubenswrapper[4854]: E1125 09:47:08.225134 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="sbdb" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.225139 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="sbdb" Nov 25 09:47:08 crc kubenswrapper[4854]: E1125 09:47:08.225147 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="kube-rbac-proxy-ovn-metrics" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.225153 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="kube-rbac-proxy-ovn-metrics" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.225248 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="ovnkube-controller" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.225258 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="ovnkube-controller" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.225269 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="ovnkube-controller" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.225276 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="northd" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.225283 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="ovn-acl-logging" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.225291 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="382f3712-78a8-4c24-bebc-530f145931e6" containerName="extract" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.225299 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="ovn-controller" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.225307 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="kube-rbac-proxy-ovn-metrics" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.225312 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="nbdb" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.225320 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="sbdb" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.225325 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="kube-rbac-proxy-node" Nov 25 09:47:08 crc kubenswrapper[4854]: E1125 09:47:08.225418 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="ovnkube-controller" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.225425 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="ovnkube-controller" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.225517 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" containerName="ovnkube-controller" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.228274 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.290435 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-systemd-units\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.290482 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-log-socket\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.290508 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-29mb8\" (UniqueName: \"kubernetes.io/projected/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-kube-api-access-29mb8\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.290528 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-host-cni-netd\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.290544 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-var-lib-openvswitch\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.290565 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-host-cni-bin\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.290595 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-host-slash\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.290618 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-env-overrides\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.290635 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-etc-openvswitch\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.290654 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-ovnkube-config\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.290685 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-host-run-netns\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.290704 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-node-log\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.290722 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.290743 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-ovn-node-metrics-cert\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.290758 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-run-systemd\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.290776 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-ovnkube-script-lib\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.290793 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-host-kubelet\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.290814 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-host-run-ovn-kubernetes\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.290833 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-run-openvswitch\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.290847 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-run-ovn\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.290926 4854 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.290961 4854 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-slash\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.290973 4854 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.290982 4854 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.290991 4854 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.291000 4854 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.291009 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r24xl\" (UniqueName: \"kubernetes.io/projected/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354-kube-api-access-r24xl\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.392251 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-29mb8\" (UniqueName: \"kubernetes.io/projected/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-kube-api-access-29mb8\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.392710 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-var-lib-openvswitch\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.392730 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-host-cni-netd\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.392782 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-var-lib-openvswitch\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.392816 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-host-cni-bin\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.392892 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-host-cni-bin\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.392852 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-host-slash\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.392924 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-host-cni-netd\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.392978 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-host-slash\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.392942 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-env-overrides\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.393025 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-etc-openvswitch\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.393054 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-ovnkube-config\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.393064 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-etc-openvswitch\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.393077 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-host-run-netns\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.393103 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-node-log\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.393121 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-host-run-netns\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.393127 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.393159 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.393188 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-run-systemd\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.393194 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-node-log\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.393209 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-ovn-node-metrics-cert\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.393220 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-run-systemd\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.393231 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-ovnkube-script-lib\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.393254 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-host-kubelet\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.393291 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-host-run-ovn-kubernetes\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.393315 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-run-openvswitch\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.393341 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-run-ovn\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.393377 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-systemd-units\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.393401 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-log-socket\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.393489 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-log-socket\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.393527 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-env-overrides\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.393563 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-run-openvswitch\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.393587 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-host-kubelet\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.393609 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-run-ovn\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.393633 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-systemd-units\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.393687 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-host-run-ovn-kubernetes\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.394143 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-ovnkube-script-lib\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.394562 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-ovnkube-config\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.397867 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-ovn-node-metrics-cert\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.421010 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-29mb8\" (UniqueName: \"kubernetes.io/projected/48ec44af-f9f4-4f03-92b3-a0b8a21778a7-kube-api-access-29mb8\") pod \"ovnkube-node-k4sdl\" (UID: \"48ec44af-f9f4-4f03-92b3-a0b8a21778a7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: I1125 09:47:08.543783 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:08 crc kubenswrapper[4854]: W1125 09:47:08.573548 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod48ec44af_f9f4_4f03_92b3_a0b8a21778a7.slice/crio-b70301d0c516619eeadf2148ff50fb91f62a9ae4b0c7c81d7e9e61ddc3fda052 WatchSource:0}: Error finding container b70301d0c516619eeadf2148ff50fb91f62a9ae4b0c7c81d7e9e61ddc3fda052: Status 404 returned error can't find the container with id b70301d0c516619eeadf2148ff50fb91f62a9ae4b0c7c81d7e9e61ddc3fda052 Nov 25 09:47:09 crc kubenswrapper[4854]: I1125 09:47:09.072660 4854 generic.go:334] "Generic (PLEG): container finished" podID="48ec44af-f9f4-4f03-92b3-a0b8a21778a7" containerID="5d76956e017e335971a9d9bb685aadff39aa3185f0084b8d3ee60c76478d1380" exitCode=0 Nov 25 09:47:09 crc kubenswrapper[4854]: I1125 09:47:09.072715 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" event={"ID":"48ec44af-f9f4-4f03-92b3-a0b8a21778a7","Type":"ContainerDied","Data":"5d76956e017e335971a9d9bb685aadff39aa3185f0084b8d3ee60c76478d1380"} Nov 25 09:47:09 crc kubenswrapper[4854]: I1125 09:47:09.073139 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" event={"ID":"48ec44af-f9f4-4f03-92b3-a0b8a21778a7","Type":"ContainerStarted","Data":"b70301d0c516619eeadf2148ff50fb91f62a9ae4b0c7c81d7e9e61ddc3fda052"} Nov 25 09:47:09 crc kubenswrapper[4854]: I1125 09:47:09.077815 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gt7mq_b2e0e8c4-35b0-4ca8-acec-d6c94cf76354/ovn-acl-logging/0.log" Nov 25 09:47:09 crc kubenswrapper[4854]: I1125 09:47:09.078256 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gt7mq_b2e0e8c4-35b0-4ca8-acec-d6c94cf76354/ovn-controller/0.log" Nov 25 09:47:09 crc kubenswrapper[4854]: I1125 09:47:09.078619 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" event={"ID":"b2e0e8c4-35b0-4ca8-acec-d6c94cf76354","Type":"ContainerDied","Data":"bba53019997f81293d66cc26f16c72a727dc367f98f0bc49aa65c04a9d480932"} Nov 25 09:47:09 crc kubenswrapper[4854]: I1125 09:47:09.078664 4854 scope.go:117] "RemoveContainer" containerID="6d9f4f27a0dee1b92577e22bdfe67d21b43a1be64519f231492aeb22d0ba2e0b" Nov 25 09:47:09 crc kubenswrapper[4854]: I1125 09:47:09.078743 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-gt7mq" Nov 25 09:47:09 crc kubenswrapper[4854]: I1125 09:47:09.100887 4854 scope.go:117] "RemoveContainer" containerID="50a479ca6721781365a54ff1fd8aad2b8b97e0f298fe46b62c51cbc2434e980b" Nov 25 09:47:09 crc kubenswrapper[4854]: I1125 09:47:09.118694 4854 scope.go:117] "RemoveContainer" containerID="e15435107b0cdf606bf3d4f78ddfec8b6641611a912f3fea51ba1ffc3031df3b" Nov 25 09:47:09 crc kubenswrapper[4854]: I1125 09:47:09.146288 4854 scope.go:117] "RemoveContainer" containerID="eb143029f5bbd51e49119aba69fcb5ecb8febacbdfa089ccef1298a4f372278b" Nov 25 09:47:09 crc kubenswrapper[4854]: I1125 09:47:09.167857 4854 scope.go:117] "RemoveContainer" containerID="6a694f4eb324ec480c999e6d57a53599b61b5bb42c3f3342e4672bc90b2e095e" Nov 25 09:47:09 crc kubenswrapper[4854]: I1125 09:47:09.182256 4854 scope.go:117] "RemoveContainer" containerID="b34a8b788c0ff6e0d0cb1dc375ec9e2ae9a748c911713adc69f9e37f576222ec" Nov 25 09:47:09 crc kubenswrapper[4854]: I1125 09:47:09.233528 4854 scope.go:117] "RemoveContainer" containerID="225d630b20a5da5172de2d03044794bbf6ac21d95421a502d21c642c92421e83" Nov 25 09:47:09 crc kubenswrapper[4854]: I1125 09:47:09.257224 4854 scope.go:117] "RemoveContainer" containerID="6acdb40732904b212fc33575cf482c9e194722160e66442aee0619cda7f2c755" Nov 25 09:47:09 crc kubenswrapper[4854]: I1125 09:47:09.269439 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-gt7mq"] Nov 25 09:47:09 crc kubenswrapper[4854]: I1125 09:47:09.275209 4854 scope.go:117] "RemoveContainer" containerID="08b48fb267253e247742f43db5bc3755b161d0e9ea9c7a08b53f7feff0a770d1" Nov 25 09:47:09 crc kubenswrapper[4854]: I1125 09:47:09.275993 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-gt7mq"] Nov 25 09:47:10 crc kubenswrapper[4854]: I1125 09:47:10.086580 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" event={"ID":"48ec44af-f9f4-4f03-92b3-a0b8a21778a7","Type":"ContainerStarted","Data":"3dcaf838274b20f13961be6513b25b57876039afc7b38a43d5e4ca6f3efa7055"} Nov 25 09:47:10 crc kubenswrapper[4854]: I1125 09:47:10.087653 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" event={"ID":"48ec44af-f9f4-4f03-92b3-a0b8a21778a7","Type":"ContainerStarted","Data":"9147db2779de0a478c8aec2e4cb7ec4a750539c6e376da55e695c7586aee8a21"} Nov 25 09:47:10 crc kubenswrapper[4854]: I1125 09:47:10.087750 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" event={"ID":"48ec44af-f9f4-4f03-92b3-a0b8a21778a7","Type":"ContainerStarted","Data":"108525b42ef90cf288e70ea69eb2066f335bb9b99727cbd6cfa71ffbb0958921"} Nov 25 09:47:11 crc kubenswrapper[4854]: I1125 09:47:11.024077 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b2e0e8c4-35b0-4ca8-acec-d6c94cf76354" path="/var/lib/kubelet/pods/b2e0e8c4-35b0-4ca8-acec-d6c94cf76354/volumes" Nov 25 09:47:11 crc kubenswrapper[4854]: I1125 09:47:11.095375 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" event={"ID":"48ec44af-f9f4-4f03-92b3-a0b8a21778a7","Type":"ContainerStarted","Data":"2e9aa8289214d567a5c574ded35040249b1bbc50d1966363f4a9aad72011be18"} Nov 25 09:47:11 crc kubenswrapper[4854]: I1125 09:47:11.095419 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" event={"ID":"48ec44af-f9f4-4f03-92b3-a0b8a21778a7","Type":"ContainerStarted","Data":"981a79d2b5acc3b8537c5961ed5d45134a0a70c251f2858dffd6fd83d8a30b81"} Nov 25 09:47:12 crc kubenswrapper[4854]: I1125 09:47:12.107369 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" event={"ID":"48ec44af-f9f4-4f03-92b3-a0b8a21778a7","Type":"ContainerStarted","Data":"f8bc95e30bba0470e97c9d6f47c69f4123d122d4e1f85bd7f47638d8aa39985f"} Nov 25 09:47:12 crc kubenswrapper[4854]: I1125 09:47:12.108755 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" event={"ID":"48ec44af-f9f4-4f03-92b3-a0b8a21778a7","Type":"ContainerStarted","Data":"cf751389201076504e941db54edbd9b33e556e90f894afc0db5c09434be3745a"} Nov 25 09:47:12 crc kubenswrapper[4854]: I1125 09:47:12.787613 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-pt9cp"] Nov 25 09:47:12 crc kubenswrapper[4854]: I1125 09:47:12.788438 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-pt9cp" Nov 25 09:47:12 crc kubenswrapper[4854]: I1125 09:47:12.790098 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Nov 25 09:47:12 crc kubenswrapper[4854]: I1125 09:47:12.790656 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-sqpvs" Nov 25 09:47:12 crc kubenswrapper[4854]: I1125 09:47:12.791184 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Nov 25 09:47:12 crc kubenswrapper[4854]: I1125 09:47:12.851713 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ththj\" (UniqueName: \"kubernetes.io/projected/a79ee517-3034-49c0-98d1-a547d6f27e4c-kube-api-access-ththj\") pod \"obo-prometheus-operator-668cf9dfbb-pt9cp\" (UID: \"a79ee517-3034-49c0-98d1-a547d6f27e4c\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-pt9cp" Nov 25 09:47:12 crc kubenswrapper[4854]: I1125 09:47:12.923372 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-vklqz"] Nov 25 09:47:12 crc kubenswrapper[4854]: I1125 09:47:12.924197 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-vklqz" Nov 25 09:47:12 crc kubenswrapper[4854]: I1125 09:47:12.925988 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-p4w4n" Nov 25 09:47:12 crc kubenswrapper[4854]: I1125 09:47:12.926234 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Nov 25 09:47:12 crc kubenswrapper[4854]: I1125 09:47:12.930040 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-4pmqd"] Nov 25 09:47:12 crc kubenswrapper[4854]: I1125 09:47:12.930941 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-4pmqd" Nov 25 09:47:12 crc kubenswrapper[4854]: I1125 09:47:12.953212 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ththj\" (UniqueName: \"kubernetes.io/projected/a79ee517-3034-49c0-98d1-a547d6f27e4c-kube-api-access-ththj\") pod \"obo-prometheus-operator-668cf9dfbb-pt9cp\" (UID: \"a79ee517-3034-49c0-98d1-a547d6f27e4c\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-pt9cp" Nov 25 09:47:12 crc kubenswrapper[4854]: I1125 09:47:12.987168 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ththj\" (UniqueName: \"kubernetes.io/projected/a79ee517-3034-49c0-98d1-a547d6f27e4c-kube-api-access-ththj\") pod \"obo-prometheus-operator-668cf9dfbb-pt9cp\" (UID: \"a79ee517-3034-49c0-98d1-a547d6f27e4c\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-pt9cp" Nov 25 09:47:13 crc kubenswrapper[4854]: I1125 09:47:13.055218 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/669f6ca8-f193-423b-91c3-ae56039ee589-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-779d668dff-vklqz\" (UID: \"669f6ca8-f193-423b-91c3-ae56039ee589\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-vklqz" Nov 25 09:47:13 crc kubenswrapper[4854]: I1125 09:47:13.055566 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/669f6ca8-f193-423b-91c3-ae56039ee589-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-779d668dff-vklqz\" (UID: \"669f6ca8-f193-423b-91c3-ae56039ee589\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-vklqz" Nov 25 09:47:13 crc kubenswrapper[4854]: I1125 09:47:13.055707 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7a14e79c-e6de-4e5a-b016-5dad4e2baecb-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-779d668dff-4pmqd\" (UID: \"7a14e79c-e6de-4e5a-b016-5dad4e2baecb\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-4pmqd" Nov 25 09:47:13 crc kubenswrapper[4854]: I1125 09:47:13.055813 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7a14e79c-e6de-4e5a-b016-5dad4e2baecb-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-779d668dff-4pmqd\" (UID: \"7a14e79c-e6de-4e5a-b016-5dad4e2baecb\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-4pmqd" Nov 25 09:47:13 crc kubenswrapper[4854]: I1125 09:47:13.106902 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-pt9cp" Nov 25 09:47:13 crc kubenswrapper[4854]: I1125 09:47:13.113136 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-k2d6w"] Nov 25 09:47:13 crc kubenswrapper[4854]: I1125 09:47:13.116886 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-k2d6w" Nov 25 09:47:13 crc kubenswrapper[4854]: I1125 09:47:13.122121 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Nov 25 09:47:13 crc kubenswrapper[4854]: I1125 09:47:13.122927 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-lmnzt" Nov 25 09:47:13 crc kubenswrapper[4854]: I1125 09:47:13.156991 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/669f6ca8-f193-423b-91c3-ae56039ee589-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-779d668dff-vklqz\" (UID: \"669f6ca8-f193-423b-91c3-ae56039ee589\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-vklqz" Nov 25 09:47:13 crc kubenswrapper[4854]: I1125 09:47:13.157060 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-87dph\" (UniqueName: \"kubernetes.io/projected/80bbdac7-392c-4568-9519-45ce6747e77c-kube-api-access-87dph\") pod \"observability-operator-d8bb48f5d-k2d6w\" (UID: \"80bbdac7-392c-4568-9519-45ce6747e77c\") " pod="openshift-operators/observability-operator-d8bb48f5d-k2d6w" Nov 25 09:47:13 crc kubenswrapper[4854]: I1125 09:47:13.157112 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/669f6ca8-f193-423b-91c3-ae56039ee589-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-779d668dff-vklqz\" (UID: \"669f6ca8-f193-423b-91c3-ae56039ee589\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-vklqz" Nov 25 09:47:13 crc kubenswrapper[4854]: I1125 09:47:13.157175 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7a14e79c-e6de-4e5a-b016-5dad4e2baecb-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-779d668dff-4pmqd\" (UID: \"7a14e79c-e6de-4e5a-b016-5dad4e2baecb\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-4pmqd" Nov 25 09:47:13 crc kubenswrapper[4854]: I1125 09:47:13.157206 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/80bbdac7-392c-4568-9519-45ce6747e77c-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-k2d6w\" (UID: \"80bbdac7-392c-4568-9519-45ce6747e77c\") " pod="openshift-operators/observability-operator-d8bb48f5d-k2d6w" Nov 25 09:47:13 crc kubenswrapper[4854]: I1125 09:47:13.157237 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7a14e79c-e6de-4e5a-b016-5dad4e2baecb-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-779d668dff-4pmqd\" (UID: \"7a14e79c-e6de-4e5a-b016-5dad4e2baecb\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-4pmqd" Nov 25 09:47:13 crc kubenswrapper[4854]: E1125 09:47:13.157904 4854 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-pt9cp_openshift-operators_a79ee517-3034-49c0-98d1-a547d6f27e4c_0(dd792f45987c5e150b144b41ce05db565ca243b77366e7ec3ca20823fd0120b3): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:47:13 crc kubenswrapper[4854]: E1125 09:47:13.157986 4854 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-pt9cp_openshift-operators_a79ee517-3034-49c0-98d1-a547d6f27e4c_0(dd792f45987c5e150b144b41ce05db565ca243b77366e7ec3ca20823fd0120b3): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-pt9cp" Nov 25 09:47:13 crc kubenswrapper[4854]: E1125 09:47:13.158016 4854 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-pt9cp_openshift-operators_a79ee517-3034-49c0-98d1-a547d6f27e4c_0(dd792f45987c5e150b144b41ce05db565ca243b77366e7ec3ca20823fd0120b3): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-pt9cp" Nov 25 09:47:13 crc kubenswrapper[4854]: E1125 09:47:13.158066 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-668cf9dfbb-pt9cp_openshift-operators(a79ee517-3034-49c0-98d1-a547d6f27e4c)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-668cf9dfbb-pt9cp_openshift-operators(a79ee517-3034-49c0-98d1-a547d6f27e4c)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-pt9cp_openshift-operators_a79ee517-3034-49c0-98d1-a547d6f27e4c_0(dd792f45987c5e150b144b41ce05db565ca243b77366e7ec3ca20823fd0120b3): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-pt9cp" podUID="a79ee517-3034-49c0-98d1-a547d6f27e4c" Nov 25 09:47:13 crc kubenswrapper[4854]: I1125 09:47:13.162322 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7a14e79c-e6de-4e5a-b016-5dad4e2baecb-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-779d668dff-4pmqd\" (UID: \"7a14e79c-e6de-4e5a-b016-5dad4e2baecb\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-4pmqd" Nov 25 09:47:13 crc kubenswrapper[4854]: I1125 09:47:13.165606 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7a14e79c-e6de-4e5a-b016-5dad4e2baecb-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-779d668dff-4pmqd\" (UID: \"7a14e79c-e6de-4e5a-b016-5dad4e2baecb\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-4pmqd" Nov 25 09:47:13 crc kubenswrapper[4854]: I1125 09:47:13.167129 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/669f6ca8-f193-423b-91c3-ae56039ee589-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-779d668dff-vklqz\" (UID: \"669f6ca8-f193-423b-91c3-ae56039ee589\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-vklqz" Nov 25 09:47:13 crc kubenswrapper[4854]: I1125 09:47:13.168396 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/669f6ca8-f193-423b-91c3-ae56039ee589-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-779d668dff-vklqz\" (UID: \"669f6ca8-f193-423b-91c3-ae56039ee589\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-vklqz" Nov 25 09:47:13 crc kubenswrapper[4854]: I1125 09:47:13.228830 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-5446b9c989-djf5f"] Nov 25 09:47:13 crc kubenswrapper[4854]: I1125 09:47:13.230180 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-djf5f" Nov 25 09:47:13 crc kubenswrapper[4854]: I1125 09:47:13.232354 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-lxht5" Nov 25 09:47:13 crc kubenswrapper[4854]: I1125 09:47:13.246249 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-vklqz" Nov 25 09:47:13 crc kubenswrapper[4854]: I1125 09:47:13.254768 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-4pmqd" Nov 25 09:47:13 crc kubenswrapper[4854]: I1125 09:47:13.265021 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/80bbdac7-392c-4568-9519-45ce6747e77c-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-k2d6w\" (UID: \"80bbdac7-392c-4568-9519-45ce6747e77c\") " pod="openshift-operators/observability-operator-d8bb48f5d-k2d6w" Nov 25 09:47:13 crc kubenswrapper[4854]: I1125 09:47:13.265146 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-87dph\" (UniqueName: \"kubernetes.io/projected/80bbdac7-392c-4568-9519-45ce6747e77c-kube-api-access-87dph\") pod \"observability-operator-d8bb48f5d-k2d6w\" (UID: \"80bbdac7-392c-4568-9519-45ce6747e77c\") " pod="openshift-operators/observability-operator-d8bb48f5d-k2d6w" Nov 25 09:47:13 crc kubenswrapper[4854]: I1125 09:47:13.272270 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/80bbdac7-392c-4568-9519-45ce6747e77c-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-k2d6w\" (UID: \"80bbdac7-392c-4568-9519-45ce6747e77c\") " pod="openshift-operators/observability-operator-d8bb48f5d-k2d6w" Nov 25 09:47:13 crc kubenswrapper[4854]: E1125 09:47:13.294865 4854 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-779d668dff-vklqz_openshift-operators_669f6ca8-f193-423b-91c3-ae56039ee589_0(533b9378da8253091e00f581259b78792d2d1cf041a87e68c9d87096dddf8679): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:47:13 crc kubenswrapper[4854]: E1125 09:47:13.294940 4854 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-779d668dff-vklqz_openshift-operators_669f6ca8-f193-423b-91c3-ae56039ee589_0(533b9378da8253091e00f581259b78792d2d1cf041a87e68c9d87096dddf8679): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-vklqz" Nov 25 09:47:13 crc kubenswrapper[4854]: E1125 09:47:13.294960 4854 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-779d668dff-vklqz_openshift-operators_669f6ca8-f193-423b-91c3-ae56039ee589_0(533b9378da8253091e00f581259b78792d2d1cf041a87e68c9d87096dddf8679): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-vklqz" Nov 25 09:47:13 crc kubenswrapper[4854]: E1125 09:47:13.295003 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-779d668dff-vklqz_openshift-operators(669f6ca8-f193-423b-91c3-ae56039ee589)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-779d668dff-vklqz_openshift-operators(669f6ca8-f193-423b-91c3-ae56039ee589)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-779d668dff-vklqz_openshift-operators_669f6ca8-f193-423b-91c3-ae56039ee589_0(533b9378da8253091e00f581259b78792d2d1cf041a87e68c9d87096dddf8679): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-vklqz" podUID="669f6ca8-f193-423b-91c3-ae56039ee589" Nov 25 09:47:13 crc kubenswrapper[4854]: E1125 09:47:13.297054 4854 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-779d668dff-4pmqd_openshift-operators_7a14e79c-e6de-4e5a-b016-5dad4e2baecb_0(946bcb6d50fc58f667abb47b6db658631ae7014c94b8bdfc001bcc4f0ea14096): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:47:13 crc kubenswrapper[4854]: E1125 09:47:13.297137 4854 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-779d668dff-4pmqd_openshift-operators_7a14e79c-e6de-4e5a-b016-5dad4e2baecb_0(946bcb6d50fc58f667abb47b6db658631ae7014c94b8bdfc001bcc4f0ea14096): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-4pmqd" Nov 25 09:47:13 crc kubenswrapper[4854]: E1125 09:47:13.297206 4854 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-779d668dff-4pmqd_openshift-operators_7a14e79c-e6de-4e5a-b016-5dad4e2baecb_0(946bcb6d50fc58f667abb47b6db658631ae7014c94b8bdfc001bcc4f0ea14096): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-4pmqd" Nov 25 09:47:13 crc kubenswrapper[4854]: E1125 09:47:13.297266 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-779d668dff-4pmqd_openshift-operators(7a14e79c-e6de-4e5a-b016-5dad4e2baecb)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-779d668dff-4pmqd_openshift-operators(7a14e79c-e6de-4e5a-b016-5dad4e2baecb)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-779d668dff-4pmqd_openshift-operators_7a14e79c-e6de-4e5a-b016-5dad4e2baecb_0(946bcb6d50fc58f667abb47b6db658631ae7014c94b8bdfc001bcc4f0ea14096): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-4pmqd" podUID="7a14e79c-e6de-4e5a-b016-5dad4e2baecb" Nov 25 09:47:13 crc kubenswrapper[4854]: I1125 09:47:13.338662 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-87dph\" (UniqueName: \"kubernetes.io/projected/80bbdac7-392c-4568-9519-45ce6747e77c-kube-api-access-87dph\") pod \"observability-operator-d8bb48f5d-k2d6w\" (UID: \"80bbdac7-392c-4568-9519-45ce6747e77c\") " pod="openshift-operators/observability-operator-d8bb48f5d-k2d6w" Nov 25 09:47:13 crc kubenswrapper[4854]: I1125 09:47:13.366656 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/d54da2d3-0328-4498-a150-7a12963c3d43-openshift-service-ca\") pod \"perses-operator-5446b9c989-djf5f\" (UID: \"d54da2d3-0328-4498-a150-7a12963c3d43\") " pod="openshift-operators/perses-operator-5446b9c989-djf5f" Nov 25 09:47:13 crc kubenswrapper[4854]: I1125 09:47:13.366752 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jg5xb\" (UniqueName: \"kubernetes.io/projected/d54da2d3-0328-4498-a150-7a12963c3d43-kube-api-access-jg5xb\") pod \"perses-operator-5446b9c989-djf5f\" (UID: \"d54da2d3-0328-4498-a150-7a12963c3d43\") " pod="openshift-operators/perses-operator-5446b9c989-djf5f" Nov 25 09:47:13 crc kubenswrapper[4854]: I1125 09:47:13.468033 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/d54da2d3-0328-4498-a150-7a12963c3d43-openshift-service-ca\") pod \"perses-operator-5446b9c989-djf5f\" (UID: \"d54da2d3-0328-4498-a150-7a12963c3d43\") " pod="openshift-operators/perses-operator-5446b9c989-djf5f" Nov 25 09:47:13 crc kubenswrapper[4854]: I1125 09:47:13.468110 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jg5xb\" (UniqueName: \"kubernetes.io/projected/d54da2d3-0328-4498-a150-7a12963c3d43-kube-api-access-jg5xb\") pod \"perses-operator-5446b9c989-djf5f\" (UID: \"d54da2d3-0328-4498-a150-7a12963c3d43\") " pod="openshift-operators/perses-operator-5446b9c989-djf5f" Nov 25 09:47:13 crc kubenswrapper[4854]: I1125 09:47:13.469004 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/d54da2d3-0328-4498-a150-7a12963c3d43-openshift-service-ca\") pod \"perses-operator-5446b9c989-djf5f\" (UID: \"d54da2d3-0328-4498-a150-7a12963c3d43\") " pod="openshift-operators/perses-operator-5446b9c989-djf5f" Nov 25 09:47:13 crc kubenswrapper[4854]: I1125 09:47:13.490483 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jg5xb\" (UniqueName: \"kubernetes.io/projected/d54da2d3-0328-4498-a150-7a12963c3d43-kube-api-access-jg5xb\") pod \"perses-operator-5446b9c989-djf5f\" (UID: \"d54da2d3-0328-4498-a150-7a12963c3d43\") " pod="openshift-operators/perses-operator-5446b9c989-djf5f" Nov 25 09:47:13 crc kubenswrapper[4854]: I1125 09:47:13.527700 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-k2d6w" Nov 25 09:47:13 crc kubenswrapper[4854]: I1125 09:47:13.546755 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-djf5f" Nov 25 09:47:13 crc kubenswrapper[4854]: E1125 09:47:13.552886 4854 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-k2d6w_openshift-operators_80bbdac7-392c-4568-9519-45ce6747e77c_0(243ae6349dd1202ad664eacef49c657b22ca39cc73c3e0762bfdfa15d0d69373): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:47:13 crc kubenswrapper[4854]: E1125 09:47:13.553005 4854 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-k2d6w_openshift-operators_80bbdac7-392c-4568-9519-45ce6747e77c_0(243ae6349dd1202ad664eacef49c657b22ca39cc73c3e0762bfdfa15d0d69373): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-k2d6w" Nov 25 09:47:13 crc kubenswrapper[4854]: E1125 09:47:13.553035 4854 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-k2d6w_openshift-operators_80bbdac7-392c-4568-9519-45ce6747e77c_0(243ae6349dd1202ad664eacef49c657b22ca39cc73c3e0762bfdfa15d0d69373): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-k2d6w" Nov 25 09:47:13 crc kubenswrapper[4854]: E1125 09:47:13.553102 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-d8bb48f5d-k2d6w_openshift-operators(80bbdac7-392c-4568-9519-45ce6747e77c)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-d8bb48f5d-k2d6w_openshift-operators(80bbdac7-392c-4568-9519-45ce6747e77c)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-k2d6w_openshift-operators_80bbdac7-392c-4568-9519-45ce6747e77c_0(243ae6349dd1202ad664eacef49c657b22ca39cc73c3e0762bfdfa15d0d69373): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-d8bb48f5d-k2d6w" podUID="80bbdac7-392c-4568-9519-45ce6747e77c" Nov 25 09:47:13 crc kubenswrapper[4854]: E1125 09:47:13.573808 4854 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-djf5f_openshift-operators_d54da2d3-0328-4498-a150-7a12963c3d43_0(c02c93a723c562de2e176140f135c68efb64caf950b225eb1cdc2c422423e6c4): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:47:13 crc kubenswrapper[4854]: E1125 09:47:13.573878 4854 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-djf5f_openshift-operators_d54da2d3-0328-4498-a150-7a12963c3d43_0(c02c93a723c562de2e176140f135c68efb64caf950b225eb1cdc2c422423e6c4): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-djf5f" Nov 25 09:47:13 crc kubenswrapper[4854]: E1125 09:47:13.573907 4854 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-djf5f_openshift-operators_d54da2d3-0328-4498-a150-7a12963c3d43_0(c02c93a723c562de2e176140f135c68efb64caf950b225eb1cdc2c422423e6c4): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-djf5f" Nov 25 09:47:13 crc kubenswrapper[4854]: E1125 09:47:13.573953 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5446b9c989-djf5f_openshift-operators(d54da2d3-0328-4498-a150-7a12963c3d43)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5446b9c989-djf5f_openshift-operators(d54da2d3-0328-4498-a150-7a12963c3d43)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-djf5f_openshift-operators_d54da2d3-0328-4498-a150-7a12963c3d43_0(c02c93a723c562de2e176140f135c68efb64caf950b225eb1cdc2c422423e6c4): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5446b9c989-djf5f" podUID="d54da2d3-0328-4498-a150-7a12963c3d43" Nov 25 09:47:15 crc kubenswrapper[4854]: I1125 09:47:15.155269 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" event={"ID":"48ec44af-f9f4-4f03-92b3-a0b8a21778a7","Type":"ContainerStarted","Data":"62504d1cc435b99762119f10bae0ef03f8c531bd1d9c7fd01bb204f8bf694a50"} Nov 25 09:47:15 crc kubenswrapper[4854]: I1125 09:47:15.157160 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:15 crc kubenswrapper[4854]: I1125 09:47:15.157193 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:15 crc kubenswrapper[4854]: I1125 09:47:15.157252 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:15 crc kubenswrapper[4854]: I1125 09:47:15.198218 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-djf5f"] Nov 25 09:47:15 crc kubenswrapper[4854]: I1125 09:47:15.198416 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-djf5f" Nov 25 09:47:15 crc kubenswrapper[4854]: I1125 09:47:15.198806 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-djf5f" Nov 25 09:47:15 crc kubenswrapper[4854]: I1125 09:47:15.200910 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:15 crc kubenswrapper[4854]: I1125 09:47:15.210088 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" podStartSLOduration=7.21006625 podStartE2EDuration="7.21006625s" podCreationTimestamp="2025-11-25 09:47:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:47:15.191951437 +0000 UTC m=+641.044944803" watchObservedRunningTime="2025-11-25 09:47:15.21006625 +0000 UTC m=+641.063059636" Nov 25 09:47:15 crc kubenswrapper[4854]: I1125 09:47:15.211067 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:15 crc kubenswrapper[4854]: I1125 09:47:15.221084 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-pt9cp"] Nov 25 09:47:15 crc kubenswrapper[4854]: I1125 09:47:15.221217 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-pt9cp" Nov 25 09:47:15 crc kubenswrapper[4854]: I1125 09:47:15.221593 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-pt9cp" Nov 25 09:47:15 crc kubenswrapper[4854]: I1125 09:47:15.229865 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-vklqz"] Nov 25 09:47:15 crc kubenswrapper[4854]: I1125 09:47:15.230029 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-vklqz" Nov 25 09:47:15 crc kubenswrapper[4854]: I1125 09:47:15.230549 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-vklqz" Nov 25 09:47:15 crc kubenswrapper[4854]: I1125 09:47:15.244009 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-4pmqd"] Nov 25 09:47:15 crc kubenswrapper[4854]: I1125 09:47:15.244177 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-4pmqd" Nov 25 09:47:15 crc kubenswrapper[4854]: I1125 09:47:15.244764 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-4pmqd" Nov 25 09:47:15 crc kubenswrapper[4854]: E1125 09:47:15.254974 4854 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-djf5f_openshift-operators_d54da2d3-0328-4498-a150-7a12963c3d43_0(40d0a07376c0aaacdc9ed1bb4c7f8bfdb4397c0fef552b31f4e2efb27ab8f250): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:47:15 crc kubenswrapper[4854]: E1125 09:47:15.255064 4854 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-djf5f_openshift-operators_d54da2d3-0328-4498-a150-7a12963c3d43_0(40d0a07376c0aaacdc9ed1bb4c7f8bfdb4397c0fef552b31f4e2efb27ab8f250): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-djf5f" Nov 25 09:47:15 crc kubenswrapper[4854]: E1125 09:47:15.255095 4854 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-djf5f_openshift-operators_d54da2d3-0328-4498-a150-7a12963c3d43_0(40d0a07376c0aaacdc9ed1bb4c7f8bfdb4397c0fef552b31f4e2efb27ab8f250): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-djf5f" Nov 25 09:47:15 crc kubenswrapper[4854]: E1125 09:47:15.255149 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5446b9c989-djf5f_openshift-operators(d54da2d3-0328-4498-a150-7a12963c3d43)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5446b9c989-djf5f_openshift-operators(d54da2d3-0328-4498-a150-7a12963c3d43)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-djf5f_openshift-operators_d54da2d3-0328-4498-a150-7a12963c3d43_0(40d0a07376c0aaacdc9ed1bb4c7f8bfdb4397c0fef552b31f4e2efb27ab8f250): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5446b9c989-djf5f" podUID="d54da2d3-0328-4498-a150-7a12963c3d43" Nov 25 09:47:15 crc kubenswrapper[4854]: I1125 09:47:15.297283 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-k2d6w"] Nov 25 09:47:15 crc kubenswrapper[4854]: I1125 09:47:15.297413 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-k2d6w" Nov 25 09:47:15 crc kubenswrapper[4854]: I1125 09:47:15.297874 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-k2d6w" Nov 25 09:47:15 crc kubenswrapper[4854]: E1125 09:47:15.308215 4854 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-pt9cp_openshift-operators_a79ee517-3034-49c0-98d1-a547d6f27e4c_0(6e1582caa8cf6ba5ee2d42ca78a2df08d20bafb6d39ff3f25da242593f7f3a39): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:47:15 crc kubenswrapper[4854]: E1125 09:47:15.308279 4854 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-pt9cp_openshift-operators_a79ee517-3034-49c0-98d1-a547d6f27e4c_0(6e1582caa8cf6ba5ee2d42ca78a2df08d20bafb6d39ff3f25da242593f7f3a39): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-pt9cp" Nov 25 09:47:15 crc kubenswrapper[4854]: E1125 09:47:15.308303 4854 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-pt9cp_openshift-operators_a79ee517-3034-49c0-98d1-a547d6f27e4c_0(6e1582caa8cf6ba5ee2d42ca78a2df08d20bafb6d39ff3f25da242593f7f3a39): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-pt9cp" Nov 25 09:47:15 crc kubenswrapper[4854]: E1125 09:47:15.308348 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-668cf9dfbb-pt9cp_openshift-operators(a79ee517-3034-49c0-98d1-a547d6f27e4c)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-668cf9dfbb-pt9cp_openshift-operators(a79ee517-3034-49c0-98d1-a547d6f27e4c)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-pt9cp_openshift-operators_a79ee517-3034-49c0-98d1-a547d6f27e4c_0(6e1582caa8cf6ba5ee2d42ca78a2df08d20bafb6d39ff3f25da242593f7f3a39): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-pt9cp" podUID="a79ee517-3034-49c0-98d1-a547d6f27e4c" Nov 25 09:47:15 crc kubenswrapper[4854]: E1125 09:47:15.327249 4854 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-779d668dff-4pmqd_openshift-operators_7a14e79c-e6de-4e5a-b016-5dad4e2baecb_0(85a5141c4ced7b5beda2ac208ec981db85df157dfabf5ac7411a31dcb67416eb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:47:15 crc kubenswrapper[4854]: E1125 09:47:15.327316 4854 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-779d668dff-4pmqd_openshift-operators_7a14e79c-e6de-4e5a-b016-5dad4e2baecb_0(85a5141c4ced7b5beda2ac208ec981db85df157dfabf5ac7411a31dcb67416eb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-4pmqd" Nov 25 09:47:15 crc kubenswrapper[4854]: E1125 09:47:15.327345 4854 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-779d668dff-4pmqd_openshift-operators_7a14e79c-e6de-4e5a-b016-5dad4e2baecb_0(85a5141c4ced7b5beda2ac208ec981db85df157dfabf5ac7411a31dcb67416eb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-4pmqd" Nov 25 09:47:15 crc kubenswrapper[4854]: E1125 09:47:15.327404 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-779d668dff-4pmqd_openshift-operators(7a14e79c-e6de-4e5a-b016-5dad4e2baecb)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-779d668dff-4pmqd_openshift-operators(7a14e79c-e6de-4e5a-b016-5dad4e2baecb)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-779d668dff-4pmqd_openshift-operators_7a14e79c-e6de-4e5a-b016-5dad4e2baecb_0(85a5141c4ced7b5beda2ac208ec981db85df157dfabf5ac7411a31dcb67416eb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-4pmqd" podUID="7a14e79c-e6de-4e5a-b016-5dad4e2baecb" Nov 25 09:47:15 crc kubenswrapper[4854]: E1125 09:47:15.334933 4854 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-779d668dff-vklqz_openshift-operators_669f6ca8-f193-423b-91c3-ae56039ee589_0(b096a64a3bb845524abe6e741e0625006470c8e38c1a656b36b83c7c536d4842): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:47:15 crc kubenswrapper[4854]: E1125 09:47:15.335031 4854 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-779d668dff-vklqz_openshift-operators_669f6ca8-f193-423b-91c3-ae56039ee589_0(b096a64a3bb845524abe6e741e0625006470c8e38c1a656b36b83c7c536d4842): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-vklqz" Nov 25 09:47:15 crc kubenswrapper[4854]: E1125 09:47:15.335062 4854 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-779d668dff-vklqz_openshift-operators_669f6ca8-f193-423b-91c3-ae56039ee589_0(b096a64a3bb845524abe6e741e0625006470c8e38c1a656b36b83c7c536d4842): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-vklqz" Nov 25 09:47:15 crc kubenswrapper[4854]: E1125 09:47:15.335128 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-779d668dff-vklqz_openshift-operators(669f6ca8-f193-423b-91c3-ae56039ee589)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-779d668dff-vklqz_openshift-operators(669f6ca8-f193-423b-91c3-ae56039ee589)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-779d668dff-vklqz_openshift-operators_669f6ca8-f193-423b-91c3-ae56039ee589_0(b096a64a3bb845524abe6e741e0625006470c8e38c1a656b36b83c7c536d4842): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-vklqz" podUID="669f6ca8-f193-423b-91c3-ae56039ee589" Nov 25 09:47:15 crc kubenswrapper[4854]: E1125 09:47:15.365714 4854 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-k2d6w_openshift-operators_80bbdac7-392c-4568-9519-45ce6747e77c_0(ad7eaff50a2439cd0ac9c5dd3e826110038bc10278d94705d9d9091cfe3ac8eb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:47:15 crc kubenswrapper[4854]: E1125 09:47:15.365811 4854 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-k2d6w_openshift-operators_80bbdac7-392c-4568-9519-45ce6747e77c_0(ad7eaff50a2439cd0ac9c5dd3e826110038bc10278d94705d9d9091cfe3ac8eb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-k2d6w" Nov 25 09:47:15 crc kubenswrapper[4854]: E1125 09:47:15.365844 4854 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-k2d6w_openshift-operators_80bbdac7-392c-4568-9519-45ce6747e77c_0(ad7eaff50a2439cd0ac9c5dd3e826110038bc10278d94705d9d9091cfe3ac8eb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-k2d6w" Nov 25 09:47:15 crc kubenswrapper[4854]: E1125 09:47:15.365912 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-d8bb48f5d-k2d6w_openshift-operators(80bbdac7-392c-4568-9519-45ce6747e77c)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-d8bb48f5d-k2d6w_openshift-operators(80bbdac7-392c-4568-9519-45ce6747e77c)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-k2d6w_openshift-operators_80bbdac7-392c-4568-9519-45ce6747e77c_0(ad7eaff50a2439cd0ac9c5dd3e826110038bc10278d94705d9d9091cfe3ac8eb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-d8bb48f5d-k2d6w" podUID="80bbdac7-392c-4568-9519-45ce6747e77c" Nov 25 09:47:21 crc kubenswrapper[4854]: I1125 09:47:21.013170 4854 scope.go:117] "RemoveContainer" containerID="5feac493298404321ee4fceca3870193d9a2cb42b9f7d769848893b756653fb9" Nov 25 09:47:22 crc kubenswrapper[4854]: I1125 09:47:22.194266 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-txnt5_a0e9f759-2eea-43cd-9e0a-6f149785c431/kube-multus/1.log" Nov 25 09:47:22 crc kubenswrapper[4854]: I1125 09:47:22.194722 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-txnt5" event={"ID":"a0e9f759-2eea-43cd-9e0a-6f149785c431","Type":"ContainerStarted","Data":"74e96f74e4c6fe06255f318eeb7bf1c1d575d9497db03ddcaf86397308a92d23"} Nov 25 09:47:26 crc kubenswrapper[4854]: I1125 09:47:26.013083 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-djf5f" Nov 25 09:47:26 crc kubenswrapper[4854]: I1125 09:47:26.013669 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-djf5f" Nov 25 09:47:26 crc kubenswrapper[4854]: I1125 09:47:26.457751 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-djf5f"] Nov 25 09:47:26 crc kubenswrapper[4854]: W1125 09:47:26.478086 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd54da2d3_0328_4498_a150_7a12963c3d43.slice/crio-e141a48438ac562b42b493330359c0ca9078397a61a5833e9752c48c5538d65e WatchSource:0}: Error finding container e141a48438ac562b42b493330359c0ca9078397a61a5833e9752c48c5538d65e: Status 404 returned error can't find the container with id e141a48438ac562b42b493330359c0ca9078397a61a5833e9752c48c5538d65e Nov 25 09:47:27 crc kubenswrapper[4854]: I1125 09:47:27.012734 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-k2d6w" Nov 25 09:47:27 crc kubenswrapper[4854]: I1125 09:47:27.013581 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-k2d6w" Nov 25 09:47:27 crc kubenswrapper[4854]: I1125 09:47:27.210055 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-k2d6w"] Nov 25 09:47:27 crc kubenswrapper[4854]: I1125 09:47:27.225219 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-djf5f" event={"ID":"d54da2d3-0328-4498-a150-7a12963c3d43","Type":"ContainerStarted","Data":"e141a48438ac562b42b493330359c0ca9078397a61a5833e9752c48c5538d65e"} Nov 25 09:47:27 crc kubenswrapper[4854]: W1125 09:47:27.233113 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod80bbdac7_392c_4568_9519_45ce6747e77c.slice/crio-bef8bcc3771ba68ef529eb4847f79280e3ca619c1dd351f94d21e5de873bb126 WatchSource:0}: Error finding container bef8bcc3771ba68ef529eb4847f79280e3ca619c1dd351f94d21e5de873bb126: Status 404 returned error can't find the container with id bef8bcc3771ba68ef529eb4847f79280e3ca619c1dd351f94d21e5de873bb126 Nov 25 09:47:28 crc kubenswrapper[4854]: I1125 09:47:28.012657 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-4pmqd" Nov 25 09:47:28 crc kubenswrapper[4854]: I1125 09:47:28.013393 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-4pmqd" Nov 25 09:47:28 crc kubenswrapper[4854]: I1125 09:47:28.014077 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-vklqz" Nov 25 09:47:28 crc kubenswrapper[4854]: I1125 09:47:28.014386 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-vklqz" Nov 25 09:47:28 crc kubenswrapper[4854]: I1125 09:47:28.234324 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-k2d6w" event={"ID":"80bbdac7-392c-4568-9519-45ce6747e77c","Type":"ContainerStarted","Data":"bef8bcc3771ba68ef529eb4847f79280e3ca619c1dd351f94d21e5de873bb126"} Nov 25 09:47:28 crc kubenswrapper[4854]: I1125 09:47:28.302542 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-4pmqd"] Nov 25 09:47:28 crc kubenswrapper[4854]: I1125 09:47:28.389561 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-vklqz"] Nov 25 09:47:29 crc kubenswrapper[4854]: I1125 09:47:29.012807 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-pt9cp" Nov 25 09:47:29 crc kubenswrapper[4854]: I1125 09:47:29.013298 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-pt9cp" Nov 25 09:47:29 crc kubenswrapper[4854]: I1125 09:47:29.243326 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-4pmqd" event={"ID":"7a14e79c-e6de-4e5a-b016-5dad4e2baecb","Type":"ContainerStarted","Data":"f54a76ad95d00e5270a8703562d2bf610487360df3c80aaf5854919ee2847451"} Nov 25 09:47:29 crc kubenswrapper[4854]: I1125 09:47:29.246989 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-vklqz" event={"ID":"669f6ca8-f193-423b-91c3-ae56039ee589","Type":"ContainerStarted","Data":"5a90f54f00c360162adea682801542b6b320d20aff91a6f8db52a198b96fdbba"} Nov 25 09:47:29 crc kubenswrapper[4854]: I1125 09:47:29.508264 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-pt9cp"] Nov 25 09:47:30 crc kubenswrapper[4854]: I1125 09:47:30.292390 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-pt9cp" event={"ID":"a79ee517-3034-49c0-98d1-a547d6f27e4c","Type":"ContainerStarted","Data":"a73d98ae70f2b35b6bd82fb2719c84ceb5b9fb96e6cce5f8fc5f3f1308cb5f82"} Nov 25 09:47:38 crc kubenswrapper[4854]: I1125 09:47:38.567178 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-k4sdl" Nov 25 09:47:43 crc kubenswrapper[4854]: I1125 09:47:43.390925 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-pt9cp" event={"ID":"a79ee517-3034-49c0-98d1-a547d6f27e4c","Type":"ContainerStarted","Data":"ccbb8dfccd55ba6c1b7270248e45c24aa87e93291d7a8ef204461a3f137984ea"} Nov 25 09:47:43 crc kubenswrapper[4854]: I1125 09:47:43.392534 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-k2d6w" event={"ID":"80bbdac7-392c-4568-9519-45ce6747e77c","Type":"ContainerStarted","Data":"6b5dd2878092cfb81fc57b759f3030bfaa90b1a3ee98ad59ac773b415f225834"} Nov 25 09:47:43 crc kubenswrapper[4854]: I1125 09:47:43.393933 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-d8bb48f5d-k2d6w" Nov 25 09:47:43 crc kubenswrapper[4854]: I1125 09:47:43.394970 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-d8bb48f5d-k2d6w" Nov 25 09:47:43 crc kubenswrapper[4854]: I1125 09:47:43.396170 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-4pmqd" event={"ID":"7a14e79c-e6de-4e5a-b016-5dad4e2baecb","Type":"ContainerStarted","Data":"8f805370632410fa8ee7a9ee279057e627d54ac36e4d58beb792912095e26d7b"} Nov 25 09:47:43 crc kubenswrapper[4854]: I1125 09:47:43.397627 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-vklqz" event={"ID":"669f6ca8-f193-423b-91c3-ae56039ee589","Type":"ContainerStarted","Data":"0091a30994e297c03ab65cc80c6bc4108944c91d04deb2a76a8291b8ba1abacc"} Nov 25 09:47:43 crc kubenswrapper[4854]: I1125 09:47:43.399625 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-djf5f" event={"ID":"d54da2d3-0328-4498-a150-7a12963c3d43","Type":"ContainerStarted","Data":"ce127d87346ca804111d31a0f5330b6799184c06cb2df243d5da9800ed342778"} Nov 25 09:47:43 crc kubenswrapper[4854]: I1125 09:47:43.399840 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5446b9c989-djf5f" Nov 25 09:47:43 crc kubenswrapper[4854]: I1125 09:47:43.407338 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-pt9cp" podStartSLOduration=18.66526805 podStartE2EDuration="31.407322608s" podCreationTimestamp="2025-11-25 09:47:12 +0000 UTC" firstStartedPulling="2025-11-25 09:47:29.522940055 +0000 UTC m=+655.375933421" lastFinishedPulling="2025-11-25 09:47:42.264994583 +0000 UTC m=+668.117987979" observedRunningTime="2025-11-25 09:47:43.405798416 +0000 UTC m=+669.258791802" watchObservedRunningTime="2025-11-25 09:47:43.407322608 +0000 UTC m=+669.260315984" Nov 25 09:47:43 crc kubenswrapper[4854]: I1125 09:47:43.425420 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-vklqz" podStartSLOduration=17.63448444 podStartE2EDuration="31.425387111s" podCreationTimestamp="2025-11-25 09:47:12 +0000 UTC" firstStartedPulling="2025-11-25 09:47:28.473548098 +0000 UTC m=+654.326541474" lastFinishedPulling="2025-11-25 09:47:42.264450769 +0000 UTC m=+668.117444145" observedRunningTime="2025-11-25 09:47:43.420406147 +0000 UTC m=+669.273399523" watchObservedRunningTime="2025-11-25 09:47:43.425387111 +0000 UTC m=+669.278380487" Nov 25 09:47:43 crc kubenswrapper[4854]: I1125 09:47:43.487771 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-5446b9c989-djf5f" podStartSLOduration=14.735621933000001 podStartE2EDuration="30.487752959s" podCreationTimestamp="2025-11-25 09:47:13 +0000 UTC" firstStartedPulling="2025-11-25 09:47:26.479771812 +0000 UTC m=+652.332765188" lastFinishedPulling="2025-11-25 09:47:42.231902838 +0000 UTC m=+668.084896214" observedRunningTime="2025-11-25 09:47:43.487638816 +0000 UTC m=+669.340632192" watchObservedRunningTime="2025-11-25 09:47:43.487752959 +0000 UTC m=+669.340746355" Nov 25 09:47:43 crc kubenswrapper[4854]: I1125 09:47:43.488332 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-d8bb48f5d-k2d6w" podStartSLOduration=15.423093918 podStartE2EDuration="30.488327845s" podCreationTimestamp="2025-11-25 09:47:13 +0000 UTC" firstStartedPulling="2025-11-25 09:47:27.235882863 +0000 UTC m=+653.088876239" lastFinishedPulling="2025-11-25 09:47:42.30111679 +0000 UTC m=+668.154110166" observedRunningTime="2025-11-25 09:47:43.465612157 +0000 UTC m=+669.318605543" watchObservedRunningTime="2025-11-25 09:47:43.488327845 +0000 UTC m=+669.341321221" Nov 25 09:47:43 crc kubenswrapper[4854]: I1125 09:47:43.509508 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-779d668dff-4pmqd" podStartSLOduration=17.618012999 podStartE2EDuration="31.509486631s" podCreationTimestamp="2025-11-25 09:47:12 +0000 UTC" firstStartedPulling="2025-11-25 09:47:28.350064994 +0000 UTC m=+654.203058370" lastFinishedPulling="2025-11-25 09:47:42.241538626 +0000 UTC m=+668.094532002" observedRunningTime="2025-11-25 09:47:43.505357631 +0000 UTC m=+669.358351007" watchObservedRunningTime="2025-11-25 09:47:43.509486631 +0000 UTC m=+669.362480007" Nov 25 09:47:48 crc kubenswrapper[4854]: I1125 09:47:48.788919 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-tfnst"] Nov 25 09:47:48 crc kubenswrapper[4854]: I1125 09:47:48.790394 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-tfnst" Nov 25 09:47:48 crc kubenswrapper[4854]: I1125 09:47:48.794330 4854 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-mjlqk" Nov 25 09:47:48 crc kubenswrapper[4854]: I1125 09:47:48.794342 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 25 09:47:48 crc kubenswrapper[4854]: I1125 09:47:48.795073 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 25 09:47:48 crc kubenswrapper[4854]: I1125 09:47:48.808656 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-tfnst"] Nov 25 09:47:48 crc kubenswrapper[4854]: I1125 09:47:48.817962 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-h46p2"] Nov 25 09:47:48 crc kubenswrapper[4854]: I1125 09:47:48.818771 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-h46p2" Nov 25 09:47:48 crc kubenswrapper[4854]: I1125 09:47:48.820665 4854 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-h965q" Nov 25 09:47:48 crc kubenswrapper[4854]: I1125 09:47:48.835405 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-g64kp"] Nov 25 09:47:48 crc kubenswrapper[4854]: I1125 09:47:48.836065 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-h46p2"] Nov 25 09:47:48 crc kubenswrapper[4854]: I1125 09:47:48.836136 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-g64kp" Nov 25 09:47:48 crc kubenswrapper[4854]: I1125 09:47:48.840528 4854 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-v24jr" Nov 25 09:47:48 crc kubenswrapper[4854]: I1125 09:47:48.858781 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-g64kp"] Nov 25 09:47:48 crc kubenswrapper[4854]: I1125 09:47:48.901839 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-792f5\" (UniqueName: \"kubernetes.io/projected/44c83449-45d4-4c76-a21b-e91f947cf265-kube-api-access-792f5\") pod \"cert-manager-webhook-5655c58dd6-h46p2\" (UID: \"44c83449-45d4-4c76-a21b-e91f947cf265\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-h46p2" Nov 25 09:47:48 crc kubenswrapper[4854]: I1125 09:47:48.902040 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-spwvb\" (UniqueName: \"kubernetes.io/projected/c1e17925-72e5-4e01-b5dd-b12de1f249eb-kube-api-access-spwvb\") pod \"cert-manager-5b446d88c5-g64kp\" (UID: \"c1e17925-72e5-4e01-b5dd-b12de1f249eb\") " pod="cert-manager/cert-manager-5b446d88c5-g64kp" Nov 25 09:47:48 crc kubenswrapper[4854]: I1125 09:47:48.902102 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fxqlk\" (UniqueName: \"kubernetes.io/projected/2666cb9d-61b4-435d-b44f-debac56efa9f-kube-api-access-fxqlk\") pod \"cert-manager-cainjector-7f985d654d-tfnst\" (UID: \"2666cb9d-61b4-435d-b44f-debac56efa9f\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-tfnst" Nov 25 09:47:49 crc kubenswrapper[4854]: I1125 09:47:49.003084 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-792f5\" (UniqueName: \"kubernetes.io/projected/44c83449-45d4-4c76-a21b-e91f947cf265-kube-api-access-792f5\") pod \"cert-manager-webhook-5655c58dd6-h46p2\" (UID: \"44c83449-45d4-4c76-a21b-e91f947cf265\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-h46p2" Nov 25 09:47:49 crc kubenswrapper[4854]: I1125 09:47:49.003325 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-spwvb\" (UniqueName: \"kubernetes.io/projected/c1e17925-72e5-4e01-b5dd-b12de1f249eb-kube-api-access-spwvb\") pod \"cert-manager-5b446d88c5-g64kp\" (UID: \"c1e17925-72e5-4e01-b5dd-b12de1f249eb\") " pod="cert-manager/cert-manager-5b446d88c5-g64kp" Nov 25 09:47:49 crc kubenswrapper[4854]: I1125 09:47:49.003418 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fxqlk\" (UniqueName: \"kubernetes.io/projected/2666cb9d-61b4-435d-b44f-debac56efa9f-kube-api-access-fxqlk\") pod \"cert-manager-cainjector-7f985d654d-tfnst\" (UID: \"2666cb9d-61b4-435d-b44f-debac56efa9f\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-tfnst" Nov 25 09:47:49 crc kubenswrapper[4854]: I1125 09:47:49.023061 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-792f5\" (UniqueName: \"kubernetes.io/projected/44c83449-45d4-4c76-a21b-e91f947cf265-kube-api-access-792f5\") pod \"cert-manager-webhook-5655c58dd6-h46p2\" (UID: \"44c83449-45d4-4c76-a21b-e91f947cf265\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-h46p2" Nov 25 09:47:49 crc kubenswrapper[4854]: I1125 09:47:49.026228 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fxqlk\" (UniqueName: \"kubernetes.io/projected/2666cb9d-61b4-435d-b44f-debac56efa9f-kube-api-access-fxqlk\") pod \"cert-manager-cainjector-7f985d654d-tfnst\" (UID: \"2666cb9d-61b4-435d-b44f-debac56efa9f\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-tfnst" Nov 25 09:47:49 crc kubenswrapper[4854]: I1125 09:47:49.027403 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-spwvb\" (UniqueName: \"kubernetes.io/projected/c1e17925-72e5-4e01-b5dd-b12de1f249eb-kube-api-access-spwvb\") pod \"cert-manager-5b446d88c5-g64kp\" (UID: \"c1e17925-72e5-4e01-b5dd-b12de1f249eb\") " pod="cert-manager/cert-manager-5b446d88c5-g64kp" Nov 25 09:47:49 crc kubenswrapper[4854]: I1125 09:47:49.111009 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-tfnst" Nov 25 09:47:49 crc kubenswrapper[4854]: I1125 09:47:49.148088 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-h46p2" Nov 25 09:47:49 crc kubenswrapper[4854]: I1125 09:47:49.159758 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-g64kp" Nov 25 09:47:49 crc kubenswrapper[4854]: I1125 09:47:49.516844 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-tfnst"] Nov 25 09:47:49 crc kubenswrapper[4854]: W1125 09:47:49.521051 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2666cb9d_61b4_435d_b44f_debac56efa9f.slice/crio-d5d1d8f9be3f52bf3a60a2edc2042346910fdce85b0f3e20b8d52a102fd9c012 WatchSource:0}: Error finding container d5d1d8f9be3f52bf3a60a2edc2042346910fdce85b0f3e20b8d52a102fd9c012: Status 404 returned error can't find the container with id d5d1d8f9be3f52bf3a60a2edc2042346910fdce85b0f3e20b8d52a102fd9c012 Nov 25 09:47:49 crc kubenswrapper[4854]: I1125 09:47:49.599525 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-g64kp"] Nov 25 09:47:49 crc kubenswrapper[4854]: I1125 09:47:49.605593 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-h46p2"] Nov 25 09:47:49 crc kubenswrapper[4854]: W1125 09:47:49.608957 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc1e17925_72e5_4e01_b5dd_b12de1f249eb.slice/crio-942d030b7c8dff8302b268335257a0750082f8a6db56a21fb02defdb47695671 WatchSource:0}: Error finding container 942d030b7c8dff8302b268335257a0750082f8a6db56a21fb02defdb47695671: Status 404 returned error can't find the container with id 942d030b7c8dff8302b268335257a0750082f8a6db56a21fb02defdb47695671 Nov 25 09:47:49 crc kubenswrapper[4854]: W1125 09:47:49.611235 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod44c83449_45d4_4c76_a21b_e91f947cf265.slice/crio-4e55a40036b4eca702f29699c8d3cf0d36b8d1143f4be6501527b4f0fbdfa460 WatchSource:0}: Error finding container 4e55a40036b4eca702f29699c8d3cf0d36b8d1143f4be6501527b4f0fbdfa460: Status 404 returned error can't find the container with id 4e55a40036b4eca702f29699c8d3cf0d36b8d1143f4be6501527b4f0fbdfa460 Nov 25 09:47:50 crc kubenswrapper[4854]: I1125 09:47:50.478541 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-g64kp" event={"ID":"c1e17925-72e5-4e01-b5dd-b12de1f249eb","Type":"ContainerStarted","Data":"942d030b7c8dff8302b268335257a0750082f8a6db56a21fb02defdb47695671"} Nov 25 09:47:50 crc kubenswrapper[4854]: I1125 09:47:50.480222 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-tfnst" event={"ID":"2666cb9d-61b4-435d-b44f-debac56efa9f","Type":"ContainerStarted","Data":"d5d1d8f9be3f52bf3a60a2edc2042346910fdce85b0f3e20b8d52a102fd9c012"} Nov 25 09:47:50 crc kubenswrapper[4854]: I1125 09:47:50.481078 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-h46p2" event={"ID":"44c83449-45d4-4c76-a21b-e91f947cf265","Type":"ContainerStarted","Data":"4e55a40036b4eca702f29699c8d3cf0d36b8d1143f4be6501527b4f0fbdfa460"} Nov 25 09:47:53 crc kubenswrapper[4854]: I1125 09:47:53.506865 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-g64kp" event={"ID":"c1e17925-72e5-4e01-b5dd-b12de1f249eb","Type":"ContainerStarted","Data":"0da1b952c1459fda2ff0069f00d3b7b14f7d936f65e4eaf9fe19d28478926c77"} Nov 25 09:47:53 crc kubenswrapper[4854]: I1125 09:47:53.508253 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-tfnst" event={"ID":"2666cb9d-61b4-435d-b44f-debac56efa9f","Type":"ContainerStarted","Data":"f746d8bcd7506533c04ec75366c10be9f8b38090c17775a25ec3f1729f43e0bd"} Nov 25 09:47:53 crc kubenswrapper[4854]: I1125 09:47:53.509498 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-h46p2" event={"ID":"44c83449-45d4-4c76-a21b-e91f947cf265","Type":"ContainerStarted","Data":"d662357e9f973054b57cbcf8f7461c20a647ecad3768f83e4db4ec41117be009"} Nov 25 09:47:53 crc kubenswrapper[4854]: I1125 09:47:53.509963 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-h46p2" Nov 25 09:47:53 crc kubenswrapper[4854]: I1125 09:47:53.525651 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-g64kp" podStartSLOduration=2.170800094 podStartE2EDuration="5.525631656s" podCreationTimestamp="2025-11-25 09:47:48 +0000 UTC" firstStartedPulling="2025-11-25 09:47:49.611983782 +0000 UTC m=+675.464977158" lastFinishedPulling="2025-11-25 09:47:52.966815344 +0000 UTC m=+678.819808720" observedRunningTime="2025-11-25 09:47:53.522313006 +0000 UTC m=+679.375306392" watchObservedRunningTime="2025-11-25 09:47:53.525631656 +0000 UTC m=+679.378625032" Nov 25 09:47:53 crc kubenswrapper[4854]: I1125 09:47:53.540944 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-h46p2" podStartSLOduration=2.19270538 podStartE2EDuration="5.540924485s" podCreationTimestamp="2025-11-25 09:47:48 +0000 UTC" firstStartedPulling="2025-11-25 09:47:49.613237215 +0000 UTC m=+675.466230591" lastFinishedPulling="2025-11-25 09:47:52.96145632 +0000 UTC m=+678.814449696" observedRunningTime="2025-11-25 09:47:53.537862693 +0000 UTC m=+679.390856079" watchObservedRunningTime="2025-11-25 09:47:53.540924485 +0000 UTC m=+679.393917861" Nov 25 09:47:53 crc kubenswrapper[4854]: I1125 09:47:53.548684 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5446b9c989-djf5f" Nov 25 09:47:53 crc kubenswrapper[4854]: I1125 09:47:53.564379 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-tfnst" podStartSLOduration=2.134519662 podStartE2EDuration="5.564364891s" podCreationTimestamp="2025-11-25 09:47:48 +0000 UTC" firstStartedPulling="2025-11-25 09:47:49.523475373 +0000 UTC m=+675.376468739" lastFinishedPulling="2025-11-25 09:47:52.953320592 +0000 UTC m=+678.806313968" observedRunningTime="2025-11-25 09:47:53.561687531 +0000 UTC m=+679.414680927" watchObservedRunningTime="2025-11-25 09:47:53.564364891 +0000 UTC m=+679.417358267" Nov 25 09:47:59 crc kubenswrapper[4854]: I1125 09:47:59.150808 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-h46p2" Nov 25 09:48:22 crc kubenswrapper[4854]: I1125 09:48:22.565230 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463ffrlfh"] Nov 25 09:48:22 crc kubenswrapper[4854]: I1125 09:48:22.568004 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463ffrlfh" Nov 25 09:48:22 crc kubenswrapper[4854]: I1125 09:48:22.571372 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 25 09:48:22 crc kubenswrapper[4854]: I1125 09:48:22.582078 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463ffrlfh"] Nov 25 09:48:22 crc kubenswrapper[4854]: I1125 09:48:22.640659 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/61f5661c-89ec-4adb-b6d6-ca38af3f0b5d-bundle\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463ffrlfh\" (UID: \"61f5661c-89ec-4adb-b6d6-ca38af3f0b5d\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463ffrlfh" Nov 25 09:48:22 crc kubenswrapper[4854]: I1125 09:48:22.640786 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/61f5661c-89ec-4adb-b6d6-ca38af3f0b5d-util\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463ffrlfh\" (UID: \"61f5661c-89ec-4adb-b6d6-ca38af3f0b5d\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463ffrlfh" Nov 25 09:48:22 crc kubenswrapper[4854]: I1125 09:48:22.640821 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r2vv5\" (UniqueName: \"kubernetes.io/projected/61f5661c-89ec-4adb-b6d6-ca38af3f0b5d-kube-api-access-r2vv5\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463ffrlfh\" (UID: \"61f5661c-89ec-4adb-b6d6-ca38af3f0b5d\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463ffrlfh" Nov 25 09:48:22 crc kubenswrapper[4854]: I1125 09:48:22.741778 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/61f5661c-89ec-4adb-b6d6-ca38af3f0b5d-bundle\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463ffrlfh\" (UID: \"61f5661c-89ec-4adb-b6d6-ca38af3f0b5d\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463ffrlfh" Nov 25 09:48:22 crc kubenswrapper[4854]: I1125 09:48:22.741880 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/61f5661c-89ec-4adb-b6d6-ca38af3f0b5d-util\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463ffrlfh\" (UID: \"61f5661c-89ec-4adb-b6d6-ca38af3f0b5d\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463ffrlfh" Nov 25 09:48:22 crc kubenswrapper[4854]: I1125 09:48:22.741912 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r2vv5\" (UniqueName: \"kubernetes.io/projected/61f5661c-89ec-4adb-b6d6-ca38af3f0b5d-kube-api-access-r2vv5\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463ffrlfh\" (UID: \"61f5661c-89ec-4adb-b6d6-ca38af3f0b5d\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463ffrlfh" Nov 25 09:48:22 crc kubenswrapper[4854]: I1125 09:48:22.742442 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/61f5661c-89ec-4adb-b6d6-ca38af3f0b5d-bundle\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463ffrlfh\" (UID: \"61f5661c-89ec-4adb-b6d6-ca38af3f0b5d\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463ffrlfh" Nov 25 09:48:22 crc kubenswrapper[4854]: I1125 09:48:22.742752 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/61f5661c-89ec-4adb-b6d6-ca38af3f0b5d-util\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463ffrlfh\" (UID: \"61f5661c-89ec-4adb-b6d6-ca38af3f0b5d\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463ffrlfh" Nov 25 09:48:22 crc kubenswrapper[4854]: I1125 09:48:22.761328 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r2vv5\" (UniqueName: \"kubernetes.io/projected/61f5661c-89ec-4adb-b6d6-ca38af3f0b5d-kube-api-access-r2vv5\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463ffrlfh\" (UID: \"61f5661c-89ec-4adb-b6d6-ca38af3f0b5d\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463ffrlfh" Nov 25 09:48:22 crc kubenswrapper[4854]: I1125 09:48:22.890885 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463ffrlfh" Nov 25 09:48:22 crc kubenswrapper[4854]: I1125 09:48:22.957957 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb844phb"] Nov 25 09:48:22 crc kubenswrapper[4854]: I1125 09:48:22.959280 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb844phb" Nov 25 09:48:22 crc kubenswrapper[4854]: I1125 09:48:22.973049 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb844phb"] Nov 25 09:48:23 crc kubenswrapper[4854]: I1125 09:48:23.046489 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/92b1ebf7-6ffa-4d23-a719-dbd870f3d184-util\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb844phb\" (UID: \"92b1ebf7-6ffa-4d23-a719-dbd870f3d184\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb844phb" Nov 25 09:48:23 crc kubenswrapper[4854]: I1125 09:48:23.046537 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/92b1ebf7-6ffa-4d23-a719-dbd870f3d184-bundle\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb844phb\" (UID: \"92b1ebf7-6ffa-4d23-a719-dbd870f3d184\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb844phb" Nov 25 09:48:23 crc kubenswrapper[4854]: I1125 09:48:23.046589 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tj7dr\" (UniqueName: \"kubernetes.io/projected/92b1ebf7-6ffa-4d23-a719-dbd870f3d184-kube-api-access-tj7dr\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb844phb\" (UID: \"92b1ebf7-6ffa-4d23-a719-dbd870f3d184\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb844phb" Nov 25 09:48:23 crc kubenswrapper[4854]: I1125 09:48:23.147959 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tj7dr\" (UniqueName: \"kubernetes.io/projected/92b1ebf7-6ffa-4d23-a719-dbd870f3d184-kube-api-access-tj7dr\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb844phb\" (UID: \"92b1ebf7-6ffa-4d23-a719-dbd870f3d184\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb844phb" Nov 25 09:48:23 crc kubenswrapper[4854]: I1125 09:48:23.148071 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/92b1ebf7-6ffa-4d23-a719-dbd870f3d184-util\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb844phb\" (UID: \"92b1ebf7-6ffa-4d23-a719-dbd870f3d184\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb844phb" Nov 25 09:48:23 crc kubenswrapper[4854]: I1125 09:48:23.148105 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/92b1ebf7-6ffa-4d23-a719-dbd870f3d184-bundle\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb844phb\" (UID: \"92b1ebf7-6ffa-4d23-a719-dbd870f3d184\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb844phb" Nov 25 09:48:23 crc kubenswrapper[4854]: I1125 09:48:23.149177 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/92b1ebf7-6ffa-4d23-a719-dbd870f3d184-bundle\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb844phb\" (UID: \"92b1ebf7-6ffa-4d23-a719-dbd870f3d184\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb844phb" Nov 25 09:48:23 crc kubenswrapper[4854]: I1125 09:48:23.149978 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/92b1ebf7-6ffa-4d23-a719-dbd870f3d184-util\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb844phb\" (UID: \"92b1ebf7-6ffa-4d23-a719-dbd870f3d184\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb844phb" Nov 25 09:48:23 crc kubenswrapper[4854]: I1125 09:48:23.173535 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tj7dr\" (UniqueName: \"kubernetes.io/projected/92b1ebf7-6ffa-4d23-a719-dbd870f3d184-kube-api-access-tj7dr\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb844phb\" (UID: \"92b1ebf7-6ffa-4d23-a719-dbd870f3d184\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb844phb" Nov 25 09:48:23 crc kubenswrapper[4854]: I1125 09:48:23.277936 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb844phb" Nov 25 09:48:23 crc kubenswrapper[4854]: I1125 09:48:23.377249 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463ffrlfh"] Nov 25 09:48:23 crc kubenswrapper[4854]: I1125 09:48:23.684270 4854 generic.go:334] "Generic (PLEG): container finished" podID="61f5661c-89ec-4adb-b6d6-ca38af3f0b5d" containerID="edde2f224207dae197ce3e92dfeeb117cd9557133a44237d6012632c386042cc" exitCode=0 Nov 25 09:48:23 crc kubenswrapper[4854]: I1125 09:48:23.684318 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463ffrlfh" event={"ID":"61f5661c-89ec-4adb-b6d6-ca38af3f0b5d","Type":"ContainerDied","Data":"edde2f224207dae197ce3e92dfeeb117cd9557133a44237d6012632c386042cc"} Nov 25 09:48:23 crc kubenswrapper[4854]: I1125 09:48:23.684350 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463ffrlfh" event={"ID":"61f5661c-89ec-4adb-b6d6-ca38af3f0b5d","Type":"ContainerStarted","Data":"677c32f0443ed541eb224a62a176ba8314b16f35c3d3f0c33135a10db95e2889"} Nov 25 09:48:23 crc kubenswrapper[4854]: I1125 09:48:23.753950 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb844phb"] Nov 25 09:48:23 crc kubenswrapper[4854]: W1125 09:48:23.760421 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod92b1ebf7_6ffa_4d23_a719_dbd870f3d184.slice/crio-5622cc79094ef71d8e79cb0d792930fa01f933a2ae89445e2cee195b68df6fde WatchSource:0}: Error finding container 5622cc79094ef71d8e79cb0d792930fa01f933a2ae89445e2cee195b68df6fde: Status 404 returned error can't find the container with id 5622cc79094ef71d8e79cb0d792930fa01f933a2ae89445e2cee195b68df6fde Nov 25 09:48:24 crc kubenswrapper[4854]: I1125 09:48:24.691711 4854 generic.go:334] "Generic (PLEG): container finished" podID="92b1ebf7-6ffa-4d23-a719-dbd870f3d184" containerID="3b04489d8265aeab48c273a44faf4a6509e07c6bb5c89a691980ed1989063f45" exitCode=0 Nov 25 09:48:24 crc kubenswrapper[4854]: I1125 09:48:24.691832 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb844phb" event={"ID":"92b1ebf7-6ffa-4d23-a719-dbd870f3d184","Type":"ContainerDied","Data":"3b04489d8265aeab48c273a44faf4a6509e07c6bb5c89a691980ed1989063f45"} Nov 25 09:48:24 crc kubenswrapper[4854]: I1125 09:48:24.692066 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb844phb" event={"ID":"92b1ebf7-6ffa-4d23-a719-dbd870f3d184","Type":"ContainerStarted","Data":"5622cc79094ef71d8e79cb0d792930fa01f933a2ae89445e2cee195b68df6fde"} Nov 25 09:48:25 crc kubenswrapper[4854]: I1125 09:48:25.665162 4854 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 25 09:48:25 crc kubenswrapper[4854]: I1125 09:48:25.701444 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463ffrlfh" event={"ID":"61f5661c-89ec-4adb-b6d6-ca38af3f0b5d","Type":"ContainerStarted","Data":"05dc0898de3747a36ccfbeac15341b310b81ecba43d898e6a54f2e5ffabb4ef5"} Nov 25 09:48:26 crc kubenswrapper[4854]: I1125 09:48:26.314988 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-q6pxm"] Nov 25 09:48:26 crc kubenswrapper[4854]: I1125 09:48:26.316966 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q6pxm" Nov 25 09:48:26 crc kubenswrapper[4854]: I1125 09:48:26.331970 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-q6pxm"] Nov 25 09:48:26 crc kubenswrapper[4854]: I1125 09:48:26.499917 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bec08ab8-4a76-4d8b-af1e-2cca8d191b04-utilities\") pod \"redhat-operators-q6pxm\" (UID: \"bec08ab8-4a76-4d8b-af1e-2cca8d191b04\") " pod="openshift-marketplace/redhat-operators-q6pxm" Nov 25 09:48:26 crc kubenswrapper[4854]: I1125 09:48:26.500260 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bec08ab8-4a76-4d8b-af1e-2cca8d191b04-catalog-content\") pod \"redhat-operators-q6pxm\" (UID: \"bec08ab8-4a76-4d8b-af1e-2cca8d191b04\") " pod="openshift-marketplace/redhat-operators-q6pxm" Nov 25 09:48:26 crc kubenswrapper[4854]: I1125 09:48:26.500395 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c6kcq\" (UniqueName: \"kubernetes.io/projected/bec08ab8-4a76-4d8b-af1e-2cca8d191b04-kube-api-access-c6kcq\") pod \"redhat-operators-q6pxm\" (UID: \"bec08ab8-4a76-4d8b-af1e-2cca8d191b04\") " pod="openshift-marketplace/redhat-operators-q6pxm" Nov 25 09:48:26 crc kubenswrapper[4854]: I1125 09:48:26.602398 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bec08ab8-4a76-4d8b-af1e-2cca8d191b04-utilities\") pod \"redhat-operators-q6pxm\" (UID: \"bec08ab8-4a76-4d8b-af1e-2cca8d191b04\") " pod="openshift-marketplace/redhat-operators-q6pxm" Nov 25 09:48:26 crc kubenswrapper[4854]: I1125 09:48:26.602856 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bec08ab8-4a76-4d8b-af1e-2cca8d191b04-catalog-content\") pod \"redhat-operators-q6pxm\" (UID: \"bec08ab8-4a76-4d8b-af1e-2cca8d191b04\") " pod="openshift-marketplace/redhat-operators-q6pxm" Nov 25 09:48:26 crc kubenswrapper[4854]: I1125 09:48:26.602963 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c6kcq\" (UniqueName: \"kubernetes.io/projected/bec08ab8-4a76-4d8b-af1e-2cca8d191b04-kube-api-access-c6kcq\") pod \"redhat-operators-q6pxm\" (UID: \"bec08ab8-4a76-4d8b-af1e-2cca8d191b04\") " pod="openshift-marketplace/redhat-operators-q6pxm" Nov 25 09:48:26 crc kubenswrapper[4854]: I1125 09:48:26.603283 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bec08ab8-4a76-4d8b-af1e-2cca8d191b04-utilities\") pod \"redhat-operators-q6pxm\" (UID: \"bec08ab8-4a76-4d8b-af1e-2cca8d191b04\") " pod="openshift-marketplace/redhat-operators-q6pxm" Nov 25 09:48:26 crc kubenswrapper[4854]: I1125 09:48:26.603726 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bec08ab8-4a76-4d8b-af1e-2cca8d191b04-catalog-content\") pod \"redhat-operators-q6pxm\" (UID: \"bec08ab8-4a76-4d8b-af1e-2cca8d191b04\") " pod="openshift-marketplace/redhat-operators-q6pxm" Nov 25 09:48:26 crc kubenswrapper[4854]: I1125 09:48:26.628101 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c6kcq\" (UniqueName: \"kubernetes.io/projected/bec08ab8-4a76-4d8b-af1e-2cca8d191b04-kube-api-access-c6kcq\") pod \"redhat-operators-q6pxm\" (UID: \"bec08ab8-4a76-4d8b-af1e-2cca8d191b04\") " pod="openshift-marketplace/redhat-operators-q6pxm" Nov 25 09:48:26 crc kubenswrapper[4854]: I1125 09:48:26.675201 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q6pxm" Nov 25 09:48:26 crc kubenswrapper[4854]: I1125 09:48:26.711558 4854 generic.go:334] "Generic (PLEG): container finished" podID="92b1ebf7-6ffa-4d23-a719-dbd870f3d184" containerID="75b6b72d01e716e3687efd7ca6ccb1bf8e6d9e677f10e13f146de7ccf2c9d7e7" exitCode=0 Nov 25 09:48:26 crc kubenswrapper[4854]: I1125 09:48:26.712020 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb844phb" event={"ID":"92b1ebf7-6ffa-4d23-a719-dbd870f3d184","Type":"ContainerDied","Data":"75b6b72d01e716e3687efd7ca6ccb1bf8e6d9e677f10e13f146de7ccf2c9d7e7"} Nov 25 09:48:26 crc kubenswrapper[4854]: I1125 09:48:26.734622 4854 generic.go:334] "Generic (PLEG): container finished" podID="61f5661c-89ec-4adb-b6d6-ca38af3f0b5d" containerID="05dc0898de3747a36ccfbeac15341b310b81ecba43d898e6a54f2e5ffabb4ef5" exitCode=0 Nov 25 09:48:26 crc kubenswrapper[4854]: I1125 09:48:26.734779 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463ffrlfh" event={"ID":"61f5661c-89ec-4adb-b6d6-ca38af3f0b5d","Type":"ContainerDied","Data":"05dc0898de3747a36ccfbeac15341b310b81ecba43d898e6a54f2e5ffabb4ef5"} Nov 25 09:48:27 crc kubenswrapper[4854]: I1125 09:48:27.122199 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-q6pxm"] Nov 25 09:48:27 crc kubenswrapper[4854]: W1125 09:48:27.174288 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbec08ab8_4a76_4d8b_af1e_2cca8d191b04.slice/crio-62f9f2630da46d6b4d455d60b54924d87b36217af442a765b91ed5dbfe8570e2 WatchSource:0}: Error finding container 62f9f2630da46d6b4d455d60b54924d87b36217af442a765b91ed5dbfe8570e2: Status 404 returned error can't find the container with id 62f9f2630da46d6b4d455d60b54924d87b36217af442a765b91ed5dbfe8570e2 Nov 25 09:48:27 crc kubenswrapper[4854]: I1125 09:48:27.743317 4854 generic.go:334] "Generic (PLEG): container finished" podID="61f5661c-89ec-4adb-b6d6-ca38af3f0b5d" containerID="983d1663676e3b9a85447365a38b51ffa9c0208cf251b1c6f9c856c63cf4e7cc" exitCode=0 Nov 25 09:48:27 crc kubenswrapper[4854]: I1125 09:48:27.743401 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463ffrlfh" event={"ID":"61f5661c-89ec-4adb-b6d6-ca38af3f0b5d","Type":"ContainerDied","Data":"983d1663676e3b9a85447365a38b51ffa9c0208cf251b1c6f9c856c63cf4e7cc"} Nov 25 09:48:27 crc kubenswrapper[4854]: I1125 09:48:27.746331 4854 generic.go:334] "Generic (PLEG): container finished" podID="92b1ebf7-6ffa-4d23-a719-dbd870f3d184" containerID="e2dce8607ff61dc12d995717f00d4cc5c07672449c85a5767822d8c079058d4a" exitCode=0 Nov 25 09:48:27 crc kubenswrapper[4854]: I1125 09:48:27.746411 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb844phb" event={"ID":"92b1ebf7-6ffa-4d23-a719-dbd870f3d184","Type":"ContainerDied","Data":"e2dce8607ff61dc12d995717f00d4cc5c07672449c85a5767822d8c079058d4a"} Nov 25 09:48:27 crc kubenswrapper[4854]: I1125 09:48:27.749069 4854 generic.go:334] "Generic (PLEG): container finished" podID="bec08ab8-4a76-4d8b-af1e-2cca8d191b04" containerID="4670840f349512c3d9bc861f27c1e5aa5e6e7ff8c0a805334ea4975808b2e386" exitCode=0 Nov 25 09:48:27 crc kubenswrapper[4854]: I1125 09:48:27.749127 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q6pxm" event={"ID":"bec08ab8-4a76-4d8b-af1e-2cca8d191b04","Type":"ContainerDied","Data":"4670840f349512c3d9bc861f27c1e5aa5e6e7ff8c0a805334ea4975808b2e386"} Nov 25 09:48:27 crc kubenswrapper[4854]: I1125 09:48:27.749156 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q6pxm" event={"ID":"bec08ab8-4a76-4d8b-af1e-2cca8d191b04","Type":"ContainerStarted","Data":"62f9f2630da46d6b4d455d60b54924d87b36217af442a765b91ed5dbfe8570e2"} Nov 25 09:48:28 crc kubenswrapper[4854]: I1125 09:48:28.758944 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q6pxm" event={"ID":"bec08ab8-4a76-4d8b-af1e-2cca8d191b04","Type":"ContainerStarted","Data":"c86c6a67d79086d1eb4426666fa640b5825abf04732281bd7623cb38c4d1f3da"} Nov 25 09:48:29 crc kubenswrapper[4854]: I1125 09:48:29.063446 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb844phb" Nov 25 09:48:29 crc kubenswrapper[4854]: I1125 09:48:29.070355 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463ffrlfh" Nov 25 09:48:29 crc kubenswrapper[4854]: I1125 09:48:29.250200 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/61f5661c-89ec-4adb-b6d6-ca38af3f0b5d-bundle\") pod \"61f5661c-89ec-4adb-b6d6-ca38af3f0b5d\" (UID: \"61f5661c-89ec-4adb-b6d6-ca38af3f0b5d\") " Nov 25 09:48:29 crc kubenswrapper[4854]: I1125 09:48:29.250560 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tj7dr\" (UniqueName: \"kubernetes.io/projected/92b1ebf7-6ffa-4d23-a719-dbd870f3d184-kube-api-access-tj7dr\") pod \"92b1ebf7-6ffa-4d23-a719-dbd870f3d184\" (UID: \"92b1ebf7-6ffa-4d23-a719-dbd870f3d184\") " Nov 25 09:48:29 crc kubenswrapper[4854]: I1125 09:48:29.250633 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r2vv5\" (UniqueName: \"kubernetes.io/projected/61f5661c-89ec-4adb-b6d6-ca38af3f0b5d-kube-api-access-r2vv5\") pod \"61f5661c-89ec-4adb-b6d6-ca38af3f0b5d\" (UID: \"61f5661c-89ec-4adb-b6d6-ca38af3f0b5d\") " Nov 25 09:48:29 crc kubenswrapper[4854]: I1125 09:48:29.250727 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/61f5661c-89ec-4adb-b6d6-ca38af3f0b5d-util\") pod \"61f5661c-89ec-4adb-b6d6-ca38af3f0b5d\" (UID: \"61f5661c-89ec-4adb-b6d6-ca38af3f0b5d\") " Nov 25 09:48:29 crc kubenswrapper[4854]: I1125 09:48:29.250752 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/92b1ebf7-6ffa-4d23-a719-dbd870f3d184-bundle\") pod \"92b1ebf7-6ffa-4d23-a719-dbd870f3d184\" (UID: \"92b1ebf7-6ffa-4d23-a719-dbd870f3d184\") " Nov 25 09:48:29 crc kubenswrapper[4854]: I1125 09:48:29.250805 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/92b1ebf7-6ffa-4d23-a719-dbd870f3d184-util\") pod \"92b1ebf7-6ffa-4d23-a719-dbd870f3d184\" (UID: \"92b1ebf7-6ffa-4d23-a719-dbd870f3d184\") " Nov 25 09:48:29 crc kubenswrapper[4854]: I1125 09:48:29.252957 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/92b1ebf7-6ffa-4d23-a719-dbd870f3d184-bundle" (OuterVolumeSpecName: "bundle") pod "92b1ebf7-6ffa-4d23-a719-dbd870f3d184" (UID: "92b1ebf7-6ffa-4d23-a719-dbd870f3d184"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:48:29 crc kubenswrapper[4854]: I1125 09:48:29.252973 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/61f5661c-89ec-4adb-b6d6-ca38af3f0b5d-bundle" (OuterVolumeSpecName: "bundle") pod "61f5661c-89ec-4adb-b6d6-ca38af3f0b5d" (UID: "61f5661c-89ec-4adb-b6d6-ca38af3f0b5d"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:48:29 crc kubenswrapper[4854]: I1125 09:48:29.257301 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92b1ebf7-6ffa-4d23-a719-dbd870f3d184-kube-api-access-tj7dr" (OuterVolumeSpecName: "kube-api-access-tj7dr") pod "92b1ebf7-6ffa-4d23-a719-dbd870f3d184" (UID: "92b1ebf7-6ffa-4d23-a719-dbd870f3d184"). InnerVolumeSpecName "kube-api-access-tj7dr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:48:29 crc kubenswrapper[4854]: I1125 09:48:29.262064 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/61f5661c-89ec-4adb-b6d6-ca38af3f0b5d-kube-api-access-r2vv5" (OuterVolumeSpecName: "kube-api-access-r2vv5") pod "61f5661c-89ec-4adb-b6d6-ca38af3f0b5d" (UID: "61f5661c-89ec-4adb-b6d6-ca38af3f0b5d"). InnerVolumeSpecName "kube-api-access-r2vv5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:48:29 crc kubenswrapper[4854]: I1125 09:48:29.262879 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/61f5661c-89ec-4adb-b6d6-ca38af3f0b5d-util" (OuterVolumeSpecName: "util") pod "61f5661c-89ec-4adb-b6d6-ca38af3f0b5d" (UID: "61f5661c-89ec-4adb-b6d6-ca38af3f0b5d"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:48:29 crc kubenswrapper[4854]: I1125 09:48:29.352154 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r2vv5\" (UniqueName: \"kubernetes.io/projected/61f5661c-89ec-4adb-b6d6-ca38af3f0b5d-kube-api-access-r2vv5\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:29 crc kubenswrapper[4854]: I1125 09:48:29.352387 4854 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/61f5661c-89ec-4adb-b6d6-ca38af3f0b5d-util\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:29 crc kubenswrapper[4854]: I1125 09:48:29.352493 4854 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/92b1ebf7-6ffa-4d23-a719-dbd870f3d184-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:29 crc kubenswrapper[4854]: I1125 09:48:29.352563 4854 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/61f5661c-89ec-4adb-b6d6-ca38af3f0b5d-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:29 crc kubenswrapper[4854]: I1125 09:48:29.352642 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tj7dr\" (UniqueName: \"kubernetes.io/projected/92b1ebf7-6ffa-4d23-a719-dbd870f3d184-kube-api-access-tj7dr\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:29 crc kubenswrapper[4854]: I1125 09:48:29.643377 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/92b1ebf7-6ffa-4d23-a719-dbd870f3d184-util" (OuterVolumeSpecName: "util") pod "92b1ebf7-6ffa-4d23-a719-dbd870f3d184" (UID: "92b1ebf7-6ffa-4d23-a719-dbd870f3d184"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:48:29 crc kubenswrapper[4854]: I1125 09:48:29.657984 4854 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/92b1ebf7-6ffa-4d23-a719-dbd870f3d184-util\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:29 crc kubenswrapper[4854]: I1125 09:48:29.765431 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463ffrlfh" Nov 25 09:48:29 crc kubenswrapper[4854]: I1125 09:48:29.765427 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463ffrlfh" event={"ID":"61f5661c-89ec-4adb-b6d6-ca38af3f0b5d","Type":"ContainerDied","Data":"677c32f0443ed541eb224a62a176ba8314b16f35c3d3f0c33135a10db95e2889"} Nov 25 09:48:29 crc kubenswrapper[4854]: I1125 09:48:29.765560 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="677c32f0443ed541eb224a62a176ba8314b16f35c3d3f0c33135a10db95e2889" Nov 25 09:48:29 crc kubenswrapper[4854]: I1125 09:48:29.768012 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb844phb" event={"ID":"92b1ebf7-6ffa-4d23-a719-dbd870f3d184","Type":"ContainerDied","Data":"5622cc79094ef71d8e79cb0d792930fa01f933a2ae89445e2cee195b68df6fde"} Nov 25 09:48:29 crc kubenswrapper[4854]: I1125 09:48:29.768053 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb844phb" Nov 25 09:48:29 crc kubenswrapper[4854]: I1125 09:48:29.768056 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5622cc79094ef71d8e79cb0d792930fa01f933a2ae89445e2cee195b68df6fde" Nov 25 09:48:29 crc kubenswrapper[4854]: I1125 09:48:29.769665 4854 generic.go:334] "Generic (PLEG): container finished" podID="bec08ab8-4a76-4d8b-af1e-2cca8d191b04" containerID="c86c6a67d79086d1eb4426666fa640b5825abf04732281bd7623cb38c4d1f3da" exitCode=0 Nov 25 09:48:29 crc kubenswrapper[4854]: I1125 09:48:29.769762 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q6pxm" event={"ID":"bec08ab8-4a76-4d8b-af1e-2cca8d191b04","Type":"ContainerDied","Data":"c86c6a67d79086d1eb4426666fa640b5825abf04732281bd7623cb38c4d1f3da"} Nov 25 09:48:30 crc kubenswrapper[4854]: I1125 09:48:30.778512 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q6pxm" event={"ID":"bec08ab8-4a76-4d8b-af1e-2cca8d191b04","Type":"ContainerStarted","Data":"6f28fb0c4500bcb703d63aae223a0761c141e6c7c191d7554fee9a827d5553a6"} Nov 25 09:48:30 crc kubenswrapper[4854]: I1125 09:48:30.799315 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-q6pxm" podStartSLOduration=2.398679188 podStartE2EDuration="4.799299366s" podCreationTimestamp="2025-11-25 09:48:26 +0000 UTC" firstStartedPulling="2025-11-25 09:48:27.751099415 +0000 UTC m=+713.604092791" lastFinishedPulling="2025-11-25 09:48:30.151719593 +0000 UTC m=+716.004712969" observedRunningTime="2025-11-25 09:48:30.794550606 +0000 UTC m=+716.647543992" watchObservedRunningTime="2025-11-25 09:48:30.799299366 +0000 UTC m=+716.652292742" Nov 25 09:48:36 crc kubenswrapper[4854]: I1125 09:48:36.675719 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-q6pxm" Nov 25 09:48:36 crc kubenswrapper[4854]: I1125 09:48:36.676266 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-q6pxm" Nov 25 09:48:36 crc kubenswrapper[4854]: I1125 09:48:36.715745 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-q6pxm" Nov 25 09:48:36 crc kubenswrapper[4854]: I1125 09:48:36.857610 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-q6pxm" Nov 25 09:48:37 crc kubenswrapper[4854]: I1125 09:48:37.716393 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-858c6c7dc8-bdkz2"] Nov 25 09:48:37 crc kubenswrapper[4854]: E1125 09:48:37.716655 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61f5661c-89ec-4adb-b6d6-ca38af3f0b5d" containerName="extract" Nov 25 09:48:37 crc kubenswrapper[4854]: I1125 09:48:37.716685 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="61f5661c-89ec-4adb-b6d6-ca38af3f0b5d" containerName="extract" Nov 25 09:48:37 crc kubenswrapper[4854]: E1125 09:48:37.716703 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92b1ebf7-6ffa-4d23-a719-dbd870f3d184" containerName="util" Nov 25 09:48:37 crc kubenswrapper[4854]: I1125 09:48:37.716709 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="92b1ebf7-6ffa-4d23-a719-dbd870f3d184" containerName="util" Nov 25 09:48:37 crc kubenswrapper[4854]: E1125 09:48:37.716719 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92b1ebf7-6ffa-4d23-a719-dbd870f3d184" containerName="pull" Nov 25 09:48:37 crc kubenswrapper[4854]: I1125 09:48:37.716725 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="92b1ebf7-6ffa-4d23-a719-dbd870f3d184" containerName="pull" Nov 25 09:48:37 crc kubenswrapper[4854]: E1125 09:48:37.716736 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92b1ebf7-6ffa-4d23-a719-dbd870f3d184" containerName="extract" Nov 25 09:48:37 crc kubenswrapper[4854]: I1125 09:48:37.716741 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="92b1ebf7-6ffa-4d23-a719-dbd870f3d184" containerName="extract" Nov 25 09:48:37 crc kubenswrapper[4854]: E1125 09:48:37.716755 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61f5661c-89ec-4adb-b6d6-ca38af3f0b5d" containerName="pull" Nov 25 09:48:37 crc kubenswrapper[4854]: I1125 09:48:37.716761 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="61f5661c-89ec-4adb-b6d6-ca38af3f0b5d" containerName="pull" Nov 25 09:48:37 crc kubenswrapper[4854]: E1125 09:48:37.716770 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61f5661c-89ec-4adb-b6d6-ca38af3f0b5d" containerName="util" Nov 25 09:48:37 crc kubenswrapper[4854]: I1125 09:48:37.716776 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="61f5661c-89ec-4adb-b6d6-ca38af3f0b5d" containerName="util" Nov 25 09:48:37 crc kubenswrapper[4854]: I1125 09:48:37.716884 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="92b1ebf7-6ffa-4d23-a719-dbd870f3d184" containerName="extract" Nov 25 09:48:37 crc kubenswrapper[4854]: I1125 09:48:37.716906 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="61f5661c-89ec-4adb-b6d6-ca38af3f0b5d" containerName="extract" Nov 25 09:48:37 crc kubenswrapper[4854]: I1125 09:48:37.717535 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators-redhat/loki-operator-controller-manager-858c6c7dc8-bdkz2" Nov 25 09:48:37 crc kubenswrapper[4854]: I1125 09:48:37.721416 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"loki-operator-manager-config" Nov 25 09:48:37 crc kubenswrapper[4854]: I1125 09:48:37.721513 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-controller-manager-dockercfg-cvhdf" Nov 25 09:48:37 crc kubenswrapper[4854]: I1125 09:48:37.721653 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-metrics" Nov 25 09:48:37 crc kubenswrapper[4854]: I1125 09:48:37.723536 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"kube-root-ca.crt" Nov 25 09:48:37 crc kubenswrapper[4854]: I1125 09:48:37.723691 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"openshift-service-ca.crt" Nov 25 09:48:37 crc kubenswrapper[4854]: I1125 09:48:37.723727 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-controller-manager-service-cert" Nov 25 09:48:37 crc kubenswrapper[4854]: I1125 09:48:37.740442 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-858c6c7dc8-bdkz2"] Nov 25 09:48:37 crc kubenswrapper[4854]: I1125 09:48:37.876829 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f41b420b-a51d-40b7-9a74-5930db508da8-apiservice-cert\") pod \"loki-operator-controller-manager-858c6c7dc8-bdkz2\" (UID: \"f41b420b-a51d-40b7-9a74-5930db508da8\") " pod="openshift-operators-redhat/loki-operator-controller-manager-858c6c7dc8-bdkz2" Nov 25 09:48:37 crc kubenswrapper[4854]: I1125 09:48:37.876887 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/f41b420b-a51d-40b7-9a74-5930db508da8-manager-config\") pod \"loki-operator-controller-manager-858c6c7dc8-bdkz2\" (UID: \"f41b420b-a51d-40b7-9a74-5930db508da8\") " pod="openshift-operators-redhat/loki-operator-controller-manager-858c6c7dc8-bdkz2" Nov 25 09:48:37 crc kubenswrapper[4854]: I1125 09:48:37.876957 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m2g5r\" (UniqueName: \"kubernetes.io/projected/f41b420b-a51d-40b7-9a74-5930db508da8-kube-api-access-m2g5r\") pod \"loki-operator-controller-manager-858c6c7dc8-bdkz2\" (UID: \"f41b420b-a51d-40b7-9a74-5930db508da8\") " pod="openshift-operators-redhat/loki-operator-controller-manager-858c6c7dc8-bdkz2" Nov 25 09:48:37 crc kubenswrapper[4854]: I1125 09:48:37.876983 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f41b420b-a51d-40b7-9a74-5930db508da8-webhook-cert\") pod \"loki-operator-controller-manager-858c6c7dc8-bdkz2\" (UID: \"f41b420b-a51d-40b7-9a74-5930db508da8\") " pod="openshift-operators-redhat/loki-operator-controller-manager-858c6c7dc8-bdkz2" Nov 25 09:48:37 crc kubenswrapper[4854]: I1125 09:48:37.877020 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/f41b420b-a51d-40b7-9a74-5930db508da8-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-858c6c7dc8-bdkz2\" (UID: \"f41b420b-a51d-40b7-9a74-5930db508da8\") " pod="openshift-operators-redhat/loki-operator-controller-manager-858c6c7dc8-bdkz2" Nov 25 09:48:37 crc kubenswrapper[4854]: I1125 09:48:37.978665 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f41b420b-a51d-40b7-9a74-5930db508da8-apiservice-cert\") pod \"loki-operator-controller-manager-858c6c7dc8-bdkz2\" (UID: \"f41b420b-a51d-40b7-9a74-5930db508da8\") " pod="openshift-operators-redhat/loki-operator-controller-manager-858c6c7dc8-bdkz2" Nov 25 09:48:37 crc kubenswrapper[4854]: I1125 09:48:37.978764 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/f41b420b-a51d-40b7-9a74-5930db508da8-manager-config\") pod \"loki-operator-controller-manager-858c6c7dc8-bdkz2\" (UID: \"f41b420b-a51d-40b7-9a74-5930db508da8\") " pod="openshift-operators-redhat/loki-operator-controller-manager-858c6c7dc8-bdkz2" Nov 25 09:48:37 crc kubenswrapper[4854]: I1125 09:48:37.978837 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m2g5r\" (UniqueName: \"kubernetes.io/projected/f41b420b-a51d-40b7-9a74-5930db508da8-kube-api-access-m2g5r\") pod \"loki-operator-controller-manager-858c6c7dc8-bdkz2\" (UID: \"f41b420b-a51d-40b7-9a74-5930db508da8\") " pod="openshift-operators-redhat/loki-operator-controller-manager-858c6c7dc8-bdkz2" Nov 25 09:48:37 crc kubenswrapper[4854]: I1125 09:48:37.978873 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f41b420b-a51d-40b7-9a74-5930db508da8-webhook-cert\") pod \"loki-operator-controller-manager-858c6c7dc8-bdkz2\" (UID: \"f41b420b-a51d-40b7-9a74-5930db508da8\") " pod="openshift-operators-redhat/loki-operator-controller-manager-858c6c7dc8-bdkz2" Nov 25 09:48:37 crc kubenswrapper[4854]: I1125 09:48:37.978913 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/f41b420b-a51d-40b7-9a74-5930db508da8-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-858c6c7dc8-bdkz2\" (UID: \"f41b420b-a51d-40b7-9a74-5930db508da8\") " pod="openshift-operators-redhat/loki-operator-controller-manager-858c6c7dc8-bdkz2" Nov 25 09:48:37 crc kubenswrapper[4854]: I1125 09:48:37.979934 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/f41b420b-a51d-40b7-9a74-5930db508da8-manager-config\") pod \"loki-operator-controller-manager-858c6c7dc8-bdkz2\" (UID: \"f41b420b-a51d-40b7-9a74-5930db508da8\") " pod="openshift-operators-redhat/loki-operator-controller-manager-858c6c7dc8-bdkz2" Nov 25 09:48:37 crc kubenswrapper[4854]: I1125 09:48:37.984281 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/f41b420b-a51d-40b7-9a74-5930db508da8-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-858c6c7dc8-bdkz2\" (UID: \"f41b420b-a51d-40b7-9a74-5930db508da8\") " pod="openshift-operators-redhat/loki-operator-controller-manager-858c6c7dc8-bdkz2" Nov 25 09:48:37 crc kubenswrapper[4854]: I1125 09:48:37.984299 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f41b420b-a51d-40b7-9a74-5930db508da8-webhook-cert\") pod \"loki-operator-controller-manager-858c6c7dc8-bdkz2\" (UID: \"f41b420b-a51d-40b7-9a74-5930db508da8\") " pod="openshift-operators-redhat/loki-operator-controller-manager-858c6c7dc8-bdkz2" Nov 25 09:48:37 crc kubenswrapper[4854]: I1125 09:48:37.984487 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f41b420b-a51d-40b7-9a74-5930db508da8-apiservice-cert\") pod \"loki-operator-controller-manager-858c6c7dc8-bdkz2\" (UID: \"f41b420b-a51d-40b7-9a74-5930db508da8\") " pod="openshift-operators-redhat/loki-operator-controller-manager-858c6c7dc8-bdkz2" Nov 25 09:48:37 crc kubenswrapper[4854]: I1125 09:48:37.999459 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m2g5r\" (UniqueName: \"kubernetes.io/projected/f41b420b-a51d-40b7-9a74-5930db508da8-kube-api-access-m2g5r\") pod \"loki-operator-controller-manager-858c6c7dc8-bdkz2\" (UID: \"f41b420b-a51d-40b7-9a74-5930db508da8\") " pod="openshift-operators-redhat/loki-operator-controller-manager-858c6c7dc8-bdkz2" Nov 25 09:48:38 crc kubenswrapper[4854]: I1125 09:48:38.031886 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators-redhat/loki-operator-controller-manager-858c6c7dc8-bdkz2" Nov 25 09:48:38 crc kubenswrapper[4854]: I1125 09:48:38.468493 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-858c6c7dc8-bdkz2"] Nov 25 09:48:38 crc kubenswrapper[4854]: I1125 09:48:38.829327 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-858c6c7dc8-bdkz2" event={"ID":"f41b420b-a51d-40b7-9a74-5930db508da8","Type":"ContainerStarted","Data":"cea1801d898163d65657b55a1e72bea5fe8792d68d020f1274f54822782acde1"} Nov 25 09:48:39 crc kubenswrapper[4854]: I1125 09:48:39.706162 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-q6pxm"] Nov 25 09:48:39 crc kubenswrapper[4854]: I1125 09:48:39.706928 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-q6pxm" podUID="bec08ab8-4a76-4d8b-af1e-2cca8d191b04" containerName="registry-server" containerID="cri-o://6f28fb0c4500bcb703d63aae223a0761c141e6c7c191d7554fee9a827d5553a6" gracePeriod=2 Nov 25 09:48:39 crc kubenswrapper[4854]: I1125 09:48:39.852186 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q6pxm" event={"ID":"bec08ab8-4a76-4d8b-af1e-2cca8d191b04","Type":"ContainerDied","Data":"6f28fb0c4500bcb703d63aae223a0761c141e6c7c191d7554fee9a827d5553a6"} Nov 25 09:48:39 crc kubenswrapper[4854]: I1125 09:48:39.852336 4854 generic.go:334] "Generic (PLEG): container finished" podID="bec08ab8-4a76-4d8b-af1e-2cca8d191b04" containerID="6f28fb0c4500bcb703d63aae223a0761c141e6c7c191d7554fee9a827d5553a6" exitCode=0 Nov 25 09:48:40 crc kubenswrapper[4854]: I1125 09:48:40.096072 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q6pxm" Nov 25 09:48:40 crc kubenswrapper[4854]: I1125 09:48:40.211061 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bec08ab8-4a76-4d8b-af1e-2cca8d191b04-catalog-content\") pod \"bec08ab8-4a76-4d8b-af1e-2cca8d191b04\" (UID: \"bec08ab8-4a76-4d8b-af1e-2cca8d191b04\") " Nov 25 09:48:40 crc kubenswrapper[4854]: I1125 09:48:40.211385 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c6kcq\" (UniqueName: \"kubernetes.io/projected/bec08ab8-4a76-4d8b-af1e-2cca8d191b04-kube-api-access-c6kcq\") pod \"bec08ab8-4a76-4d8b-af1e-2cca8d191b04\" (UID: \"bec08ab8-4a76-4d8b-af1e-2cca8d191b04\") " Nov 25 09:48:40 crc kubenswrapper[4854]: I1125 09:48:40.211451 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bec08ab8-4a76-4d8b-af1e-2cca8d191b04-utilities\") pod \"bec08ab8-4a76-4d8b-af1e-2cca8d191b04\" (UID: \"bec08ab8-4a76-4d8b-af1e-2cca8d191b04\") " Nov 25 09:48:40 crc kubenswrapper[4854]: I1125 09:48:40.212297 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bec08ab8-4a76-4d8b-af1e-2cca8d191b04-utilities" (OuterVolumeSpecName: "utilities") pod "bec08ab8-4a76-4d8b-af1e-2cca8d191b04" (UID: "bec08ab8-4a76-4d8b-af1e-2cca8d191b04"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:48:40 crc kubenswrapper[4854]: I1125 09:48:40.226894 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bec08ab8-4a76-4d8b-af1e-2cca8d191b04-kube-api-access-c6kcq" (OuterVolumeSpecName: "kube-api-access-c6kcq") pod "bec08ab8-4a76-4d8b-af1e-2cca8d191b04" (UID: "bec08ab8-4a76-4d8b-af1e-2cca8d191b04"). InnerVolumeSpecName "kube-api-access-c6kcq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:48:40 crc kubenswrapper[4854]: I1125 09:48:40.313408 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bec08ab8-4a76-4d8b-af1e-2cca8d191b04-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:40 crc kubenswrapper[4854]: I1125 09:48:40.313446 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c6kcq\" (UniqueName: \"kubernetes.io/projected/bec08ab8-4a76-4d8b-af1e-2cca8d191b04-kube-api-access-c6kcq\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:40 crc kubenswrapper[4854]: I1125 09:48:40.363157 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bec08ab8-4a76-4d8b-af1e-2cca8d191b04-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bec08ab8-4a76-4d8b-af1e-2cca8d191b04" (UID: "bec08ab8-4a76-4d8b-af1e-2cca8d191b04"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:48:40 crc kubenswrapper[4854]: I1125 09:48:40.414585 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bec08ab8-4a76-4d8b-af1e-2cca8d191b04-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:40 crc kubenswrapper[4854]: I1125 09:48:40.868500 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q6pxm" event={"ID":"bec08ab8-4a76-4d8b-af1e-2cca8d191b04","Type":"ContainerDied","Data":"62f9f2630da46d6b4d455d60b54924d87b36217af442a765b91ed5dbfe8570e2"} Nov 25 09:48:40 crc kubenswrapper[4854]: I1125 09:48:40.868555 4854 scope.go:117] "RemoveContainer" containerID="6f28fb0c4500bcb703d63aae223a0761c141e6c7c191d7554fee9a827d5553a6" Nov 25 09:48:40 crc kubenswrapper[4854]: I1125 09:48:40.868555 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q6pxm" Nov 25 09:48:40 crc kubenswrapper[4854]: I1125 09:48:40.890443 4854 scope.go:117] "RemoveContainer" containerID="c86c6a67d79086d1eb4426666fa640b5825abf04732281bd7623cb38c4d1f3da" Nov 25 09:48:40 crc kubenswrapper[4854]: I1125 09:48:40.896867 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-q6pxm"] Nov 25 09:48:40 crc kubenswrapper[4854]: I1125 09:48:40.903266 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-q6pxm"] Nov 25 09:48:40 crc kubenswrapper[4854]: I1125 09:48:40.917932 4854 scope.go:117] "RemoveContainer" containerID="4670840f349512c3d9bc861f27c1e5aa5e6e7ff8c0a805334ea4975808b2e386" Nov 25 09:48:41 crc kubenswrapper[4854]: I1125 09:48:41.023099 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bec08ab8-4a76-4d8b-af1e-2cca8d191b04" path="/var/lib/kubelet/pods/bec08ab8-4a76-4d8b-af1e-2cca8d191b04/volumes" Nov 25 09:48:42 crc kubenswrapper[4854]: I1125 09:48:42.683463 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/cluster-logging-operator-ff9846bd-p8rwt"] Nov 25 09:48:42 crc kubenswrapper[4854]: E1125 09:48:42.684064 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bec08ab8-4a76-4d8b-af1e-2cca8d191b04" containerName="extract-content" Nov 25 09:48:42 crc kubenswrapper[4854]: I1125 09:48:42.684082 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="bec08ab8-4a76-4d8b-af1e-2cca8d191b04" containerName="extract-content" Nov 25 09:48:42 crc kubenswrapper[4854]: E1125 09:48:42.684096 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bec08ab8-4a76-4d8b-af1e-2cca8d191b04" containerName="extract-utilities" Nov 25 09:48:42 crc kubenswrapper[4854]: I1125 09:48:42.684105 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="bec08ab8-4a76-4d8b-af1e-2cca8d191b04" containerName="extract-utilities" Nov 25 09:48:42 crc kubenswrapper[4854]: E1125 09:48:42.684127 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bec08ab8-4a76-4d8b-af1e-2cca8d191b04" containerName="registry-server" Nov 25 09:48:42 crc kubenswrapper[4854]: I1125 09:48:42.684134 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="bec08ab8-4a76-4d8b-af1e-2cca8d191b04" containerName="registry-server" Nov 25 09:48:42 crc kubenswrapper[4854]: I1125 09:48:42.684267 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="bec08ab8-4a76-4d8b-af1e-2cca8d191b04" containerName="registry-server" Nov 25 09:48:42 crc kubenswrapper[4854]: I1125 09:48:42.684822 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/cluster-logging-operator-ff9846bd-p8rwt" Nov 25 09:48:42 crc kubenswrapper[4854]: I1125 09:48:42.693230 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"openshift-service-ca.crt" Nov 25 09:48:42 crc kubenswrapper[4854]: I1125 09:48:42.693233 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"kube-root-ca.crt" Nov 25 09:48:42 crc kubenswrapper[4854]: I1125 09:48:42.693319 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"cluster-logging-operator-dockercfg-8t474" Nov 25 09:48:42 crc kubenswrapper[4854]: I1125 09:48:42.720143 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/cluster-logging-operator-ff9846bd-p8rwt"] Nov 25 09:48:42 crc kubenswrapper[4854]: I1125 09:48:42.846098 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gzlsg\" (UniqueName: \"kubernetes.io/projected/729e9fb0-9e7d-468b-8a41-df4c73b51607-kube-api-access-gzlsg\") pod \"cluster-logging-operator-ff9846bd-p8rwt\" (UID: \"729e9fb0-9e7d-468b-8a41-df4c73b51607\") " pod="openshift-logging/cluster-logging-operator-ff9846bd-p8rwt" Nov 25 09:48:42 crc kubenswrapper[4854]: I1125 09:48:42.947287 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gzlsg\" (UniqueName: \"kubernetes.io/projected/729e9fb0-9e7d-468b-8a41-df4c73b51607-kube-api-access-gzlsg\") pod \"cluster-logging-operator-ff9846bd-p8rwt\" (UID: \"729e9fb0-9e7d-468b-8a41-df4c73b51607\") " pod="openshift-logging/cluster-logging-operator-ff9846bd-p8rwt" Nov 25 09:48:42 crc kubenswrapper[4854]: I1125 09:48:42.987536 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gzlsg\" (UniqueName: \"kubernetes.io/projected/729e9fb0-9e7d-468b-8a41-df4c73b51607-kube-api-access-gzlsg\") pod \"cluster-logging-operator-ff9846bd-p8rwt\" (UID: \"729e9fb0-9e7d-468b-8a41-df4c73b51607\") " pod="openshift-logging/cluster-logging-operator-ff9846bd-p8rwt" Nov 25 09:48:42 crc kubenswrapper[4854]: I1125 09:48:42.999370 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/cluster-logging-operator-ff9846bd-p8rwt" Nov 25 09:48:44 crc kubenswrapper[4854]: I1125 09:48:44.154705 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/cluster-logging-operator-ff9846bd-p8rwt"] Nov 25 09:48:44 crc kubenswrapper[4854]: I1125 09:48:44.906212 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/cluster-logging-operator-ff9846bd-p8rwt" event={"ID":"729e9fb0-9e7d-468b-8a41-df4c73b51607","Type":"ContainerStarted","Data":"c442ad44e9b66f0aaa657a63309cc3b69d4570bc6d3745257b2c6447ec6c32bd"} Nov 25 09:48:44 crc kubenswrapper[4854]: I1125 09:48:44.907594 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-858c6c7dc8-bdkz2" event={"ID":"f41b420b-a51d-40b7-9a74-5930db508da8","Type":"ContainerStarted","Data":"13389e321a696379a7cab35d5f4d8dda8c3060c25ae6f6152906bfab0fcb9ad6"} Nov 25 09:48:53 crc kubenswrapper[4854]: I1125 09:48:53.969797 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-858c6c7dc8-bdkz2" event={"ID":"f41b420b-a51d-40b7-9a74-5930db508da8","Type":"ContainerStarted","Data":"f2096b77791f689e961d76848cd59b5c39dfe2106921a462d8ada9006fadb30b"} Nov 25 09:48:53 crc kubenswrapper[4854]: I1125 09:48:53.971461 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators-redhat/loki-operator-controller-manager-858c6c7dc8-bdkz2" Nov 25 09:48:53 crc kubenswrapper[4854]: I1125 09:48:53.971633 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/cluster-logging-operator-ff9846bd-p8rwt" event={"ID":"729e9fb0-9e7d-468b-8a41-df4c73b51607","Type":"ContainerStarted","Data":"7fa30a8dcf3096c1efc6b6b24121db453f0ae7e99f3f458150a1744fe31a0c31"} Nov 25 09:48:53 crc kubenswrapper[4854]: I1125 09:48:53.972015 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators-redhat/loki-operator-controller-manager-858c6c7dc8-bdkz2" Nov 25 09:48:53 crc kubenswrapper[4854]: I1125 09:48:53.993300 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators-redhat/loki-operator-controller-manager-858c6c7dc8-bdkz2" podStartSLOduration=2.634608456 podStartE2EDuration="16.993279997s" podCreationTimestamp="2025-11-25 09:48:37 +0000 UTC" firstStartedPulling="2025-11-25 09:48:38.476344387 +0000 UTC m=+724.329337763" lastFinishedPulling="2025-11-25 09:48:52.835015928 +0000 UTC m=+738.688009304" observedRunningTime="2025-11-25 09:48:53.987611502 +0000 UTC m=+739.840604898" watchObservedRunningTime="2025-11-25 09:48:53.993279997 +0000 UTC m=+739.846273393" Nov 25 09:48:54 crc kubenswrapper[4854]: I1125 09:48:54.013966 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/cluster-logging-operator-ff9846bd-p8rwt" podStartSLOduration=3.555657849 podStartE2EDuration="12.013952983s" podCreationTimestamp="2025-11-25 09:48:42 +0000 UTC" firstStartedPulling="2025-11-25 09:48:44.354544927 +0000 UTC m=+730.207538323" lastFinishedPulling="2025-11-25 09:48:52.812840081 +0000 UTC m=+738.665833457" observedRunningTime="2025-11-25 09:48:54.009031428 +0000 UTC m=+739.862024804" watchObservedRunningTime="2025-11-25 09:48:54.013952983 +0000 UTC m=+739.866946359" Nov 25 09:48:59 crc kubenswrapper[4854]: I1125 09:48:59.534824 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["minio-dev/minio"] Nov 25 09:48:59 crc kubenswrapper[4854]: I1125 09:48:59.536451 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="minio-dev/minio" Nov 25 09:48:59 crc kubenswrapper[4854]: I1125 09:48:59.538111 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"minio-dev"/"kube-root-ca.crt" Nov 25 09:48:59 crc kubenswrapper[4854]: I1125 09:48:59.538709 4854 reflector.go:368] Caches populated for *v1.Secret from object-"minio-dev"/"default-dockercfg-fww9q" Nov 25 09:48:59 crc kubenswrapper[4854]: I1125 09:48:59.543149 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["minio-dev/minio"] Nov 25 09:48:59 crc kubenswrapper[4854]: I1125 09:48:59.549087 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"minio-dev"/"openshift-service-ca.crt" Nov 25 09:48:59 crc kubenswrapper[4854]: I1125 09:48:59.708461 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-77f402a5-2ba5-4be0-9a79-629c6cfada95\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-77f402a5-2ba5-4be0-9a79-629c6cfada95\") pod \"minio\" (UID: \"59fdcacf-902a-46ec-82a1-94c62dd42315\") " pod="minio-dev/minio" Nov 25 09:48:59 crc kubenswrapper[4854]: I1125 09:48:59.708558 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xpnv8\" (UniqueName: \"kubernetes.io/projected/59fdcacf-902a-46ec-82a1-94c62dd42315-kube-api-access-xpnv8\") pod \"minio\" (UID: \"59fdcacf-902a-46ec-82a1-94c62dd42315\") " pod="minio-dev/minio" Nov 25 09:48:59 crc kubenswrapper[4854]: I1125 09:48:59.809728 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xpnv8\" (UniqueName: \"kubernetes.io/projected/59fdcacf-902a-46ec-82a1-94c62dd42315-kube-api-access-xpnv8\") pod \"minio\" (UID: \"59fdcacf-902a-46ec-82a1-94c62dd42315\") " pod="minio-dev/minio" Nov 25 09:48:59 crc kubenswrapper[4854]: I1125 09:48:59.809855 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-77f402a5-2ba5-4be0-9a79-629c6cfada95\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-77f402a5-2ba5-4be0-9a79-629c6cfada95\") pod \"minio\" (UID: \"59fdcacf-902a-46ec-82a1-94c62dd42315\") " pod="minio-dev/minio" Nov 25 09:48:59 crc kubenswrapper[4854]: I1125 09:48:59.812496 4854 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 09:48:59 crc kubenswrapper[4854]: I1125 09:48:59.812527 4854 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-77f402a5-2ba5-4be0-9a79-629c6cfada95\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-77f402a5-2ba5-4be0-9a79-629c6cfada95\") pod \"minio\" (UID: \"59fdcacf-902a-46ec-82a1-94c62dd42315\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/9868250c9c8ce82fc5b105657a06f71b4b7049480c946aadafc653c3ef03d748/globalmount\"" pod="minio-dev/minio" Nov 25 09:48:59 crc kubenswrapper[4854]: I1125 09:48:59.834236 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-77f402a5-2ba5-4be0-9a79-629c6cfada95\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-77f402a5-2ba5-4be0-9a79-629c6cfada95\") pod \"minio\" (UID: \"59fdcacf-902a-46ec-82a1-94c62dd42315\") " pod="minio-dev/minio" Nov 25 09:48:59 crc kubenswrapper[4854]: I1125 09:48:59.835581 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xpnv8\" (UniqueName: \"kubernetes.io/projected/59fdcacf-902a-46ec-82a1-94c62dd42315-kube-api-access-xpnv8\") pod \"minio\" (UID: \"59fdcacf-902a-46ec-82a1-94c62dd42315\") " pod="minio-dev/minio" Nov 25 09:48:59 crc kubenswrapper[4854]: I1125 09:48:59.870945 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="minio-dev/minio" Nov 25 09:48:59 crc kubenswrapper[4854]: I1125 09:48:59.969788 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-4chks"] Nov 25 09:48:59 crc kubenswrapper[4854]: I1125 09:48:59.971547 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4chks" Nov 25 09:48:59 crc kubenswrapper[4854]: I1125 09:48:59.975918 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4chks"] Nov 25 09:49:00 crc kubenswrapper[4854]: I1125 09:49:00.116952 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mgwgg\" (UniqueName: \"kubernetes.io/projected/6b2a9ddb-2e8a-4b49-bb83-e312418043d9-kube-api-access-mgwgg\") pod \"redhat-marketplace-4chks\" (UID: \"6b2a9ddb-2e8a-4b49-bb83-e312418043d9\") " pod="openshift-marketplace/redhat-marketplace-4chks" Nov 25 09:49:00 crc kubenswrapper[4854]: I1125 09:49:00.117031 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b2a9ddb-2e8a-4b49-bb83-e312418043d9-catalog-content\") pod \"redhat-marketplace-4chks\" (UID: \"6b2a9ddb-2e8a-4b49-bb83-e312418043d9\") " pod="openshift-marketplace/redhat-marketplace-4chks" Nov 25 09:49:00 crc kubenswrapper[4854]: I1125 09:49:00.117065 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b2a9ddb-2e8a-4b49-bb83-e312418043d9-utilities\") pod \"redhat-marketplace-4chks\" (UID: \"6b2a9ddb-2e8a-4b49-bb83-e312418043d9\") " pod="openshift-marketplace/redhat-marketplace-4chks" Nov 25 09:49:00 crc kubenswrapper[4854]: I1125 09:49:00.218582 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mgwgg\" (UniqueName: \"kubernetes.io/projected/6b2a9ddb-2e8a-4b49-bb83-e312418043d9-kube-api-access-mgwgg\") pod \"redhat-marketplace-4chks\" (UID: \"6b2a9ddb-2e8a-4b49-bb83-e312418043d9\") " pod="openshift-marketplace/redhat-marketplace-4chks" Nov 25 09:49:00 crc kubenswrapper[4854]: I1125 09:49:00.218716 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b2a9ddb-2e8a-4b49-bb83-e312418043d9-catalog-content\") pod \"redhat-marketplace-4chks\" (UID: \"6b2a9ddb-2e8a-4b49-bb83-e312418043d9\") " pod="openshift-marketplace/redhat-marketplace-4chks" Nov 25 09:49:00 crc kubenswrapper[4854]: I1125 09:49:00.218764 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b2a9ddb-2e8a-4b49-bb83-e312418043d9-utilities\") pod \"redhat-marketplace-4chks\" (UID: \"6b2a9ddb-2e8a-4b49-bb83-e312418043d9\") " pod="openshift-marketplace/redhat-marketplace-4chks" Nov 25 09:49:00 crc kubenswrapper[4854]: I1125 09:49:00.219326 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b2a9ddb-2e8a-4b49-bb83-e312418043d9-catalog-content\") pod \"redhat-marketplace-4chks\" (UID: \"6b2a9ddb-2e8a-4b49-bb83-e312418043d9\") " pod="openshift-marketplace/redhat-marketplace-4chks" Nov 25 09:49:00 crc kubenswrapper[4854]: I1125 09:49:00.219373 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b2a9ddb-2e8a-4b49-bb83-e312418043d9-utilities\") pod \"redhat-marketplace-4chks\" (UID: \"6b2a9ddb-2e8a-4b49-bb83-e312418043d9\") " pod="openshift-marketplace/redhat-marketplace-4chks" Nov 25 09:49:00 crc kubenswrapper[4854]: I1125 09:49:00.251631 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mgwgg\" (UniqueName: \"kubernetes.io/projected/6b2a9ddb-2e8a-4b49-bb83-e312418043d9-kube-api-access-mgwgg\") pod \"redhat-marketplace-4chks\" (UID: \"6b2a9ddb-2e8a-4b49-bb83-e312418043d9\") " pod="openshift-marketplace/redhat-marketplace-4chks" Nov 25 09:49:00 crc kubenswrapper[4854]: I1125 09:49:00.290354 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4chks" Nov 25 09:49:00 crc kubenswrapper[4854]: I1125 09:49:00.348142 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["minio-dev/minio"] Nov 25 09:49:00 crc kubenswrapper[4854]: W1125 09:49:00.737339 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6b2a9ddb_2e8a_4b49_bb83_e312418043d9.slice/crio-83a9c31e8354cecd316173c59830e1e8b30266710ead04e6f04a22595c648066 WatchSource:0}: Error finding container 83a9c31e8354cecd316173c59830e1e8b30266710ead04e6f04a22595c648066: Status 404 returned error can't find the container with id 83a9c31e8354cecd316173c59830e1e8b30266710ead04e6f04a22595c648066 Nov 25 09:49:00 crc kubenswrapper[4854]: I1125 09:49:00.737852 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4chks"] Nov 25 09:49:01 crc kubenswrapper[4854]: I1125 09:49:01.040214 4854 generic.go:334] "Generic (PLEG): container finished" podID="6b2a9ddb-2e8a-4b49-bb83-e312418043d9" containerID="ad3b45e96b90ac8838c0384152ddf852841b1a5b007598b6ffe019aba881c4c7" exitCode=0 Nov 25 09:49:01 crc kubenswrapper[4854]: I1125 09:49:01.045354 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4chks" event={"ID":"6b2a9ddb-2e8a-4b49-bb83-e312418043d9","Type":"ContainerDied","Data":"ad3b45e96b90ac8838c0384152ddf852841b1a5b007598b6ffe019aba881c4c7"} Nov 25 09:49:01 crc kubenswrapper[4854]: I1125 09:49:01.045404 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4chks" event={"ID":"6b2a9ddb-2e8a-4b49-bb83-e312418043d9","Type":"ContainerStarted","Data":"83a9c31e8354cecd316173c59830e1e8b30266710ead04e6f04a22595c648066"} Nov 25 09:49:01 crc kubenswrapper[4854]: I1125 09:49:01.045420 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="minio-dev/minio" event={"ID":"59fdcacf-902a-46ec-82a1-94c62dd42315","Type":"ContainerStarted","Data":"9cfd988ae6082a985199730228e24fe6602f0398d3cb675a743b685509773127"} Nov 25 09:49:04 crc kubenswrapper[4854]: I1125 09:49:04.063947 4854 generic.go:334] "Generic (PLEG): container finished" podID="6b2a9ddb-2e8a-4b49-bb83-e312418043d9" containerID="fd43b2c37a9bbef9173e21b59eacb27d90acf3550838442149182b604c27184c" exitCode=0 Nov 25 09:49:04 crc kubenswrapper[4854]: I1125 09:49:04.064425 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4chks" event={"ID":"6b2a9ddb-2e8a-4b49-bb83-e312418043d9","Type":"ContainerDied","Data":"fd43b2c37a9bbef9173e21b59eacb27d90acf3550838442149182b604c27184c"} Nov 25 09:49:04 crc kubenswrapper[4854]: I1125 09:49:04.066490 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="minio-dev/minio" event={"ID":"59fdcacf-902a-46ec-82a1-94c62dd42315","Type":"ContainerStarted","Data":"a251006a88819fc8f17d6db8cd3c59a00c49115ec4fc9050b3682447c305b90f"} Nov 25 09:49:04 crc kubenswrapper[4854]: I1125 09:49:04.113971 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="minio-dev/minio" podStartSLOduration=5.013198921 podStartE2EDuration="8.113950772s" podCreationTimestamp="2025-11-25 09:48:56 +0000 UTC" firstStartedPulling="2025-11-25 09:49:00.369238147 +0000 UTC m=+746.222231523" lastFinishedPulling="2025-11-25 09:49:03.469989998 +0000 UTC m=+749.322983374" observedRunningTime="2025-11-25 09:49:04.095527198 +0000 UTC m=+749.948520574" watchObservedRunningTime="2025-11-25 09:49:04.113950772 +0000 UTC m=+749.966944148" Nov 25 09:49:05 crc kubenswrapper[4854]: I1125 09:49:05.078738 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4chks" event={"ID":"6b2a9ddb-2e8a-4b49-bb83-e312418043d9","Type":"ContainerStarted","Data":"07a087d2525fa07b3b466775c1277f94400ea5b7fe2fd8784f7be2647677db1f"} Nov 25 09:49:05 crc kubenswrapper[4854]: I1125 09:49:05.112317 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-4chks" podStartSLOduration=2.6473949279999998 podStartE2EDuration="6.112302002s" podCreationTimestamp="2025-11-25 09:48:59 +0000 UTC" firstStartedPulling="2025-11-25 09:49:01.042861273 +0000 UTC m=+746.895854639" lastFinishedPulling="2025-11-25 09:49:04.507768337 +0000 UTC m=+750.360761713" observedRunningTime="2025-11-25 09:49:05.111887371 +0000 UTC m=+750.964880737" watchObservedRunningTime="2025-11-25 09:49:05.112302002 +0000 UTC m=+750.965295368" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.309029 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-distributor-76cc67bf56-9jxqv"] Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.310115 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-distributor-76cc67bf56-9jxqv" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.319088 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-dockercfg-xwdp4" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.320005 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-config" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.320173 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-distributor-grpc" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.320322 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-distributor-http" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.320402 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-ca-bundle" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.333770 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-distributor-76cc67bf56-9jxqv"] Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.457153 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/74f7ae80-fd76-4c73-85e8-a886a203733d-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-76cc67bf56-9jxqv\" (UID: \"74f7ae80-fd76-4c73-85e8-a886a203733d\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-9jxqv" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.457209 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/74f7ae80-fd76-4c73-85e8-a886a203733d-config\") pod \"logging-loki-distributor-76cc67bf56-9jxqv\" (UID: \"74f7ae80-fd76-4c73-85e8-a886a203733d\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-9jxqv" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.457248 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/74f7ae80-fd76-4c73-85e8-a886a203733d-logging-loki-distributor-http\") pod \"logging-loki-distributor-76cc67bf56-9jxqv\" (UID: \"74f7ae80-fd76-4c73-85e8-a886a203733d\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-9jxqv" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.457335 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/74f7ae80-fd76-4c73-85e8-a886a203733d-logging-loki-ca-bundle\") pod \"logging-loki-distributor-76cc67bf56-9jxqv\" (UID: \"74f7ae80-fd76-4c73-85e8-a886a203733d\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-9jxqv" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.457357 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cc4rk\" (UniqueName: \"kubernetes.io/projected/74f7ae80-fd76-4c73-85e8-a886a203733d-kube-api-access-cc4rk\") pod \"logging-loki-distributor-76cc67bf56-9jxqv\" (UID: \"74f7ae80-fd76-4c73-85e8-a886a203733d\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-9jxqv" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.516977 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-querier-5895d59bb8-dzsk5"] Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.517819 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-querier-5895d59bb8-dzsk5" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.521876 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-s3" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.522121 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-querier-http" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.522470 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-querier-grpc" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.542347 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-querier-5895d59bb8-dzsk5"] Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.562463 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/74f7ae80-fd76-4c73-85e8-a886a203733d-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-76cc67bf56-9jxqv\" (UID: \"74f7ae80-fd76-4c73-85e8-a886a203733d\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-9jxqv" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.562530 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/74f7ae80-fd76-4c73-85e8-a886a203733d-config\") pod \"logging-loki-distributor-76cc67bf56-9jxqv\" (UID: \"74f7ae80-fd76-4c73-85e8-a886a203733d\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-9jxqv" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.562580 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/74f7ae80-fd76-4c73-85e8-a886a203733d-logging-loki-distributor-http\") pod \"logging-loki-distributor-76cc67bf56-9jxqv\" (UID: \"74f7ae80-fd76-4c73-85e8-a886a203733d\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-9jxqv" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.562680 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/74f7ae80-fd76-4c73-85e8-a886a203733d-logging-loki-ca-bundle\") pod \"logging-loki-distributor-76cc67bf56-9jxqv\" (UID: \"74f7ae80-fd76-4c73-85e8-a886a203733d\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-9jxqv" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.562715 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cc4rk\" (UniqueName: \"kubernetes.io/projected/74f7ae80-fd76-4c73-85e8-a886a203733d-kube-api-access-cc4rk\") pod \"logging-loki-distributor-76cc67bf56-9jxqv\" (UID: \"74f7ae80-fd76-4c73-85e8-a886a203733d\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-9jxqv" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.579277 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/74f7ae80-fd76-4c73-85e8-a886a203733d-config\") pod \"logging-loki-distributor-76cc67bf56-9jxqv\" (UID: \"74f7ae80-fd76-4c73-85e8-a886a203733d\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-9jxqv" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.579530 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/74f7ae80-fd76-4c73-85e8-a886a203733d-logging-loki-ca-bundle\") pod \"logging-loki-distributor-76cc67bf56-9jxqv\" (UID: \"74f7ae80-fd76-4c73-85e8-a886a203733d\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-9jxqv" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.581335 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/74f7ae80-fd76-4c73-85e8-a886a203733d-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-76cc67bf56-9jxqv\" (UID: \"74f7ae80-fd76-4c73-85e8-a886a203733d\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-9jxqv" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.596469 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/74f7ae80-fd76-4c73-85e8-a886a203733d-logging-loki-distributor-http\") pod \"logging-loki-distributor-76cc67bf56-9jxqv\" (UID: \"74f7ae80-fd76-4c73-85e8-a886a203733d\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-9jxqv" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.598547 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cc4rk\" (UniqueName: \"kubernetes.io/projected/74f7ae80-fd76-4c73-85e8-a886a203733d-kube-api-access-cc4rk\") pod \"logging-loki-distributor-76cc67bf56-9jxqv\" (UID: \"74f7ae80-fd76-4c73-85e8-a886a203733d\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-9jxqv" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.617108 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-query-frontend-84558f7c9f-9xgzz"] Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.629002 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-9xgzz" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.634837 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-query-frontend-grpc" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.635089 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-query-frontend-http" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.638985 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-distributor-76cc67bf56-9jxqv" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.661354 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-query-frontend-84558f7c9f-9xgzz"] Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.664438 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d6c5fbf6-e379-49e2-a402-e36d171150dd-config\") pod \"logging-loki-querier-5895d59bb8-dzsk5\" (UID: \"d6c5fbf6-e379-49e2-a402-e36d171150dd\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-dzsk5" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.664497 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/d6c5fbf6-e379-49e2-a402-e36d171150dd-logging-loki-querier-http\") pod \"logging-loki-querier-5895d59bb8-dzsk5\" (UID: \"d6c5fbf6-e379-49e2-a402-e36d171150dd\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-dzsk5" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.664542 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/d6c5fbf6-e379-49e2-a402-e36d171150dd-logging-loki-querier-grpc\") pod \"logging-loki-querier-5895d59bb8-dzsk5\" (UID: \"d6c5fbf6-e379-49e2-a402-e36d171150dd\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-dzsk5" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.664597 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qmz9v\" (UniqueName: \"kubernetes.io/projected/d6c5fbf6-e379-49e2-a402-e36d171150dd-kube-api-access-qmz9v\") pod \"logging-loki-querier-5895d59bb8-dzsk5\" (UID: \"d6c5fbf6-e379-49e2-a402-e36d171150dd\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-dzsk5" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.664625 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d6c5fbf6-e379-49e2-a402-e36d171150dd-logging-loki-ca-bundle\") pod \"logging-loki-querier-5895d59bb8-dzsk5\" (UID: \"d6c5fbf6-e379-49e2-a402-e36d171150dd\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-dzsk5" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.664656 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/d6c5fbf6-e379-49e2-a402-e36d171150dd-logging-loki-s3\") pod \"logging-loki-querier-5895d59bb8-dzsk5\" (UID: \"d6c5fbf6-e379-49e2-a402-e36d171150dd\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-dzsk5" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.736578 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-t9zd2"] Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.743646 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t9zd2" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.755735 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-t9zd2"] Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.765904 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/111ecac4-100d-47df-97d7-23f5c048c7d5-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-84558f7c9f-9xgzz\" (UID: \"111ecac4-100d-47df-97d7-23f5c048c7d5\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-9xgzz" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.765965 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/d6c5fbf6-e379-49e2-a402-e36d171150dd-logging-loki-querier-grpc\") pod \"logging-loki-querier-5895d59bb8-dzsk5\" (UID: \"d6c5fbf6-e379-49e2-a402-e36d171150dd\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-dzsk5" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.766039 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qmz9v\" (UniqueName: \"kubernetes.io/projected/d6c5fbf6-e379-49e2-a402-e36d171150dd-kube-api-access-qmz9v\") pod \"logging-loki-querier-5895d59bb8-dzsk5\" (UID: \"d6c5fbf6-e379-49e2-a402-e36d171150dd\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-dzsk5" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.766073 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d6c5fbf6-e379-49e2-a402-e36d171150dd-logging-loki-ca-bundle\") pod \"logging-loki-querier-5895d59bb8-dzsk5\" (UID: \"d6c5fbf6-e379-49e2-a402-e36d171150dd\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-dzsk5" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.766099 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kpgc7\" (UniqueName: \"kubernetes.io/projected/111ecac4-100d-47df-97d7-23f5c048c7d5-kube-api-access-kpgc7\") pod \"logging-loki-query-frontend-84558f7c9f-9xgzz\" (UID: \"111ecac4-100d-47df-97d7-23f5c048c7d5\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-9xgzz" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.766142 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/d6c5fbf6-e379-49e2-a402-e36d171150dd-logging-loki-s3\") pod \"logging-loki-querier-5895d59bb8-dzsk5\" (UID: \"d6c5fbf6-e379-49e2-a402-e36d171150dd\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-dzsk5" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.766188 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/111ecac4-100d-47df-97d7-23f5c048c7d5-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-84558f7c9f-9xgzz\" (UID: \"111ecac4-100d-47df-97d7-23f5c048c7d5\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-9xgzz" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.766225 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/111ecac4-100d-47df-97d7-23f5c048c7d5-config\") pod \"logging-loki-query-frontend-84558f7c9f-9xgzz\" (UID: \"111ecac4-100d-47df-97d7-23f5c048c7d5\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-9xgzz" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.766255 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/111ecac4-100d-47df-97d7-23f5c048c7d5-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-84558f7c9f-9xgzz\" (UID: \"111ecac4-100d-47df-97d7-23f5c048c7d5\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-9xgzz" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.766298 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d6c5fbf6-e379-49e2-a402-e36d171150dd-config\") pod \"logging-loki-querier-5895d59bb8-dzsk5\" (UID: \"d6c5fbf6-e379-49e2-a402-e36d171150dd\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-dzsk5" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.766333 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/d6c5fbf6-e379-49e2-a402-e36d171150dd-logging-loki-querier-http\") pod \"logging-loki-querier-5895d59bb8-dzsk5\" (UID: \"d6c5fbf6-e379-49e2-a402-e36d171150dd\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-dzsk5" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.767210 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d6c5fbf6-e379-49e2-a402-e36d171150dd-logging-loki-ca-bundle\") pod \"logging-loki-querier-5895d59bb8-dzsk5\" (UID: \"d6c5fbf6-e379-49e2-a402-e36d171150dd\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-dzsk5" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.774016 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/d6c5fbf6-e379-49e2-a402-e36d171150dd-logging-loki-querier-http\") pod \"logging-loki-querier-5895d59bb8-dzsk5\" (UID: \"d6c5fbf6-e379-49e2-a402-e36d171150dd\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-dzsk5" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.777515 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/d6c5fbf6-e379-49e2-a402-e36d171150dd-logging-loki-s3\") pod \"logging-loki-querier-5895d59bb8-dzsk5\" (UID: \"d6c5fbf6-e379-49e2-a402-e36d171150dd\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-dzsk5" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.778464 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d6c5fbf6-e379-49e2-a402-e36d171150dd-config\") pod \"logging-loki-querier-5895d59bb8-dzsk5\" (UID: \"d6c5fbf6-e379-49e2-a402-e36d171150dd\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-dzsk5" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.790536 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n"] Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.791972 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/d6c5fbf6-e379-49e2-a402-e36d171150dd-logging-loki-querier-grpc\") pod \"logging-loki-querier-5895d59bb8-dzsk5\" (UID: \"d6c5fbf6-e379-49e2-a402-e36d171150dd\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-dzsk5" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.793519 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.796647 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-http" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.796962 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-gateway-ca-bundle" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.796986 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qmz9v\" (UniqueName: \"kubernetes.io/projected/d6c5fbf6-e379-49e2-a402-e36d171150dd-kube-api-access-qmz9v\") pod \"logging-loki-querier-5895d59bb8-dzsk5\" (UID: \"d6c5fbf6-e379-49e2-a402-e36d171150dd\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-dzsk5" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.797038 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps"] Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.797119 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-client-http" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.797870 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.798014 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-gateway" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.799974 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.803484 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-dockercfg-vfg4p" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.834327 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-querier-5895d59bb8-dzsk5" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.849030 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps"] Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.858262 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n"] Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.867538 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/44898682-d509-4f9b-b054-cd1df11c603d-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-7bb4c68556-bj5ps\" (UID: \"44898682-d509-4f9b-b054-cd1df11c603d\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.867601 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b011dfd2-12cc-456d-9106-ed460f6550a3-utilities\") pod \"community-operators-t9zd2\" (UID: \"b011dfd2-12cc-456d-9106-ed460f6550a3\") " pod="openshift-marketplace/community-operators-t9zd2" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.867628 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/3aad27bd-694f-4ee7-a0a3-daf16b8d42e5-tls-secret\") pod \"logging-loki-gateway-7bb4c68556-4mv8n\" (UID: \"3aad27bd-694f-4ee7-a0a3-daf16b8d42e5\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.867664 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jn4mq\" (UniqueName: \"kubernetes.io/projected/3aad27bd-694f-4ee7-a0a3-daf16b8d42e5-kube-api-access-jn4mq\") pod \"logging-loki-gateway-7bb4c68556-4mv8n\" (UID: \"3aad27bd-694f-4ee7-a0a3-daf16b8d42e5\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.867726 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kpgc7\" (UniqueName: \"kubernetes.io/projected/111ecac4-100d-47df-97d7-23f5c048c7d5-kube-api-access-kpgc7\") pod \"logging-loki-query-frontend-84558f7c9f-9xgzz\" (UID: \"111ecac4-100d-47df-97d7-23f5c048c7d5\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-9xgzz" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.867764 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/3aad27bd-694f-4ee7-a0a3-daf16b8d42e5-tenants\") pod \"logging-loki-gateway-7bb4c68556-4mv8n\" (UID: \"3aad27bd-694f-4ee7-a0a3-daf16b8d42e5\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.867804 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/44898682-d509-4f9b-b054-cd1df11c603d-lokistack-gateway\") pod \"logging-loki-gateway-7bb4c68556-bj5ps\" (UID: \"44898682-d509-4f9b-b054-cd1df11c603d\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.867834 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h6bdj\" (UniqueName: \"kubernetes.io/projected/44898682-d509-4f9b-b054-cd1df11c603d-kube-api-access-h6bdj\") pod \"logging-loki-gateway-7bb4c68556-bj5ps\" (UID: \"44898682-d509-4f9b-b054-cd1df11c603d\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.867859 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b011dfd2-12cc-456d-9106-ed460f6550a3-catalog-content\") pod \"community-operators-t9zd2\" (UID: \"b011dfd2-12cc-456d-9106-ed460f6550a3\") " pod="openshift-marketplace/community-operators-t9zd2" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.867884 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/111ecac4-100d-47df-97d7-23f5c048c7d5-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-84558f7c9f-9xgzz\" (UID: \"111ecac4-100d-47df-97d7-23f5c048c7d5\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-9xgzz" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.867927 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/44898682-d509-4f9b-b054-cd1df11c603d-logging-loki-ca-bundle\") pod \"logging-loki-gateway-7bb4c68556-bj5ps\" (UID: \"44898682-d509-4f9b-b054-cd1df11c603d\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.867952 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/44898682-d509-4f9b-b054-cd1df11c603d-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-7bb4c68556-bj5ps\" (UID: \"44898682-d509-4f9b-b054-cd1df11c603d\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.867992 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/111ecac4-100d-47df-97d7-23f5c048c7d5-config\") pod \"logging-loki-query-frontend-84558f7c9f-9xgzz\" (UID: \"111ecac4-100d-47df-97d7-23f5c048c7d5\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-9xgzz" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.868018 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/111ecac4-100d-47df-97d7-23f5c048c7d5-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-84558f7c9f-9xgzz\" (UID: \"111ecac4-100d-47df-97d7-23f5c048c7d5\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-9xgzz" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.868046 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/3aad27bd-694f-4ee7-a0a3-daf16b8d42e5-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-7bb4c68556-4mv8n\" (UID: \"3aad27bd-694f-4ee7-a0a3-daf16b8d42e5\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.868082 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3aad27bd-694f-4ee7-a0a3-daf16b8d42e5-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-7bb4c68556-4mv8n\" (UID: \"3aad27bd-694f-4ee7-a0a3-daf16b8d42e5\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.868109 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jwxlh\" (UniqueName: \"kubernetes.io/projected/b011dfd2-12cc-456d-9106-ed460f6550a3-kube-api-access-jwxlh\") pod \"community-operators-t9zd2\" (UID: \"b011dfd2-12cc-456d-9106-ed460f6550a3\") " pod="openshift-marketplace/community-operators-t9zd2" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.868132 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3aad27bd-694f-4ee7-a0a3-daf16b8d42e5-logging-loki-ca-bundle\") pod \"logging-loki-gateway-7bb4c68556-4mv8n\" (UID: \"3aad27bd-694f-4ee7-a0a3-daf16b8d42e5\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.868158 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/3aad27bd-694f-4ee7-a0a3-daf16b8d42e5-rbac\") pod \"logging-loki-gateway-7bb4c68556-4mv8n\" (UID: \"3aad27bd-694f-4ee7-a0a3-daf16b8d42e5\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.868191 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/44898682-d509-4f9b-b054-cd1df11c603d-tls-secret\") pod \"logging-loki-gateway-7bb4c68556-bj5ps\" (UID: \"44898682-d509-4f9b-b054-cd1df11c603d\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.868216 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/44898682-d509-4f9b-b054-cd1df11c603d-rbac\") pod \"logging-loki-gateway-7bb4c68556-bj5ps\" (UID: \"44898682-d509-4f9b-b054-cd1df11c603d\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.868238 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/44898682-d509-4f9b-b054-cd1df11c603d-tenants\") pod \"logging-loki-gateway-7bb4c68556-bj5ps\" (UID: \"44898682-d509-4f9b-b054-cd1df11c603d\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.868272 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/3aad27bd-694f-4ee7-a0a3-daf16b8d42e5-lokistack-gateway\") pod \"logging-loki-gateway-7bb4c68556-4mv8n\" (UID: \"3aad27bd-694f-4ee7-a0a3-daf16b8d42e5\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.868300 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/111ecac4-100d-47df-97d7-23f5c048c7d5-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-84558f7c9f-9xgzz\" (UID: \"111ecac4-100d-47df-97d7-23f5c048c7d5\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-9xgzz" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.873284 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/111ecac4-100d-47df-97d7-23f5c048c7d5-config\") pod \"logging-loki-query-frontend-84558f7c9f-9xgzz\" (UID: \"111ecac4-100d-47df-97d7-23f5c048c7d5\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-9xgzz" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.877799 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/111ecac4-100d-47df-97d7-23f5c048c7d5-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-84558f7c9f-9xgzz\" (UID: \"111ecac4-100d-47df-97d7-23f5c048c7d5\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-9xgzz" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.884799 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/111ecac4-100d-47df-97d7-23f5c048c7d5-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-84558f7c9f-9xgzz\" (UID: \"111ecac4-100d-47df-97d7-23f5c048c7d5\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-9xgzz" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.896748 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/111ecac4-100d-47df-97d7-23f5c048c7d5-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-84558f7c9f-9xgzz\" (UID: \"111ecac4-100d-47df-97d7-23f5c048c7d5\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-9xgzz" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.905626 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kpgc7\" (UniqueName: \"kubernetes.io/projected/111ecac4-100d-47df-97d7-23f5c048c7d5-kube-api-access-kpgc7\") pod \"logging-loki-query-frontend-84558f7c9f-9xgzz\" (UID: \"111ecac4-100d-47df-97d7-23f5c048c7d5\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-9xgzz" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.953266 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-9xgzz" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.973224 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/3aad27bd-694f-4ee7-a0a3-daf16b8d42e5-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-7bb4c68556-4mv8n\" (UID: \"3aad27bd-694f-4ee7-a0a3-daf16b8d42e5\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.973280 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3aad27bd-694f-4ee7-a0a3-daf16b8d42e5-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-7bb4c68556-4mv8n\" (UID: \"3aad27bd-694f-4ee7-a0a3-daf16b8d42e5\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.973305 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jwxlh\" (UniqueName: \"kubernetes.io/projected/b011dfd2-12cc-456d-9106-ed460f6550a3-kube-api-access-jwxlh\") pod \"community-operators-t9zd2\" (UID: \"b011dfd2-12cc-456d-9106-ed460f6550a3\") " pod="openshift-marketplace/community-operators-t9zd2" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.973335 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/3aad27bd-694f-4ee7-a0a3-daf16b8d42e5-rbac\") pod \"logging-loki-gateway-7bb4c68556-4mv8n\" (UID: \"3aad27bd-694f-4ee7-a0a3-daf16b8d42e5\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.973351 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3aad27bd-694f-4ee7-a0a3-daf16b8d42e5-logging-loki-ca-bundle\") pod \"logging-loki-gateway-7bb4c68556-4mv8n\" (UID: \"3aad27bd-694f-4ee7-a0a3-daf16b8d42e5\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.973373 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/44898682-d509-4f9b-b054-cd1df11c603d-tls-secret\") pod \"logging-loki-gateway-7bb4c68556-bj5ps\" (UID: \"44898682-d509-4f9b-b054-cd1df11c603d\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.973393 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/44898682-d509-4f9b-b054-cd1df11c603d-rbac\") pod \"logging-loki-gateway-7bb4c68556-bj5ps\" (UID: \"44898682-d509-4f9b-b054-cd1df11c603d\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.973411 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/44898682-d509-4f9b-b054-cd1df11c603d-tenants\") pod \"logging-loki-gateway-7bb4c68556-bj5ps\" (UID: \"44898682-d509-4f9b-b054-cd1df11c603d\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.973433 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/3aad27bd-694f-4ee7-a0a3-daf16b8d42e5-lokistack-gateway\") pod \"logging-loki-gateway-7bb4c68556-4mv8n\" (UID: \"3aad27bd-694f-4ee7-a0a3-daf16b8d42e5\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.973464 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/44898682-d509-4f9b-b054-cd1df11c603d-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-7bb4c68556-bj5ps\" (UID: \"44898682-d509-4f9b-b054-cd1df11c603d\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.973484 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b011dfd2-12cc-456d-9106-ed460f6550a3-utilities\") pod \"community-operators-t9zd2\" (UID: \"b011dfd2-12cc-456d-9106-ed460f6550a3\") " pod="openshift-marketplace/community-operators-t9zd2" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.973501 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/3aad27bd-694f-4ee7-a0a3-daf16b8d42e5-tls-secret\") pod \"logging-loki-gateway-7bb4c68556-4mv8n\" (UID: \"3aad27bd-694f-4ee7-a0a3-daf16b8d42e5\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.973523 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jn4mq\" (UniqueName: \"kubernetes.io/projected/3aad27bd-694f-4ee7-a0a3-daf16b8d42e5-kube-api-access-jn4mq\") pod \"logging-loki-gateway-7bb4c68556-4mv8n\" (UID: \"3aad27bd-694f-4ee7-a0a3-daf16b8d42e5\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.973557 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/3aad27bd-694f-4ee7-a0a3-daf16b8d42e5-tenants\") pod \"logging-loki-gateway-7bb4c68556-4mv8n\" (UID: \"3aad27bd-694f-4ee7-a0a3-daf16b8d42e5\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.973579 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/44898682-d509-4f9b-b054-cd1df11c603d-lokistack-gateway\") pod \"logging-loki-gateway-7bb4c68556-bj5ps\" (UID: \"44898682-d509-4f9b-b054-cd1df11c603d\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.973599 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h6bdj\" (UniqueName: \"kubernetes.io/projected/44898682-d509-4f9b-b054-cd1df11c603d-kube-api-access-h6bdj\") pod \"logging-loki-gateway-7bb4c68556-bj5ps\" (UID: \"44898682-d509-4f9b-b054-cd1df11c603d\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.973616 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b011dfd2-12cc-456d-9106-ed460f6550a3-catalog-content\") pod \"community-operators-t9zd2\" (UID: \"b011dfd2-12cc-456d-9106-ed460f6550a3\") " pod="openshift-marketplace/community-operators-t9zd2" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.973635 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/44898682-d509-4f9b-b054-cd1df11c603d-logging-loki-ca-bundle\") pod \"logging-loki-gateway-7bb4c68556-bj5ps\" (UID: \"44898682-d509-4f9b-b054-cd1df11c603d\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.973650 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/44898682-d509-4f9b-b054-cd1df11c603d-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-7bb4c68556-bj5ps\" (UID: \"44898682-d509-4f9b-b054-cd1df11c603d\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.974557 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/44898682-d509-4f9b-b054-cd1df11c603d-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-7bb4c68556-bj5ps\" (UID: \"44898682-d509-4f9b-b054-cd1df11c603d\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps" Nov 25 09:49:08 crc kubenswrapper[4854]: E1125 09:49:08.975832 4854 secret.go:188] Couldn't get secret openshift-logging/logging-loki-gateway-http: secret "logging-loki-gateway-http" not found Nov 25 09:49:08 crc kubenswrapper[4854]: E1125 09:49:08.975916 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3aad27bd-694f-4ee7-a0a3-daf16b8d42e5-tls-secret podName:3aad27bd-694f-4ee7-a0a3-daf16b8d42e5 nodeName:}" failed. No retries permitted until 2025-11-25 09:49:09.475897574 +0000 UTC m=+755.328890940 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-secret" (UniqueName: "kubernetes.io/secret/3aad27bd-694f-4ee7-a0a3-daf16b8d42e5-tls-secret") pod "logging-loki-gateway-7bb4c68556-4mv8n" (UID: "3aad27bd-694f-4ee7-a0a3-daf16b8d42e5") : secret "logging-loki-gateway-http" not found Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.976588 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b011dfd2-12cc-456d-9106-ed460f6550a3-utilities\") pod \"community-operators-t9zd2\" (UID: \"b011dfd2-12cc-456d-9106-ed460f6550a3\") " pod="openshift-marketplace/community-operators-t9zd2" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.977099 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b011dfd2-12cc-456d-9106-ed460f6550a3-catalog-content\") pod \"community-operators-t9zd2\" (UID: \"b011dfd2-12cc-456d-9106-ed460f6550a3\") " pod="openshift-marketplace/community-operators-t9zd2" Nov 25 09:49:08 crc kubenswrapper[4854]: E1125 09:49:08.977173 4854 secret.go:188] Couldn't get secret openshift-logging/logging-loki-gateway-http: secret "logging-loki-gateway-http" not found Nov 25 09:49:08 crc kubenswrapper[4854]: E1125 09:49:08.977217 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/44898682-d509-4f9b-b054-cd1df11c603d-tls-secret podName:44898682-d509-4f9b-b054-cd1df11c603d nodeName:}" failed. No retries permitted until 2025-11-25 09:49:09.477201289 +0000 UTC m=+755.330194755 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-secret" (UniqueName: "kubernetes.io/secret/44898682-d509-4f9b-b054-cd1df11c603d-tls-secret") pod "logging-loki-gateway-7bb4c68556-bj5ps" (UID: "44898682-d509-4f9b-b054-cd1df11c603d") : secret "logging-loki-gateway-http" not found Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.977311 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/44898682-d509-4f9b-b054-cd1df11c603d-rbac\") pod \"logging-loki-gateway-7bb4c68556-bj5ps\" (UID: \"44898682-d509-4f9b-b054-cd1df11c603d\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.977488 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/44898682-d509-4f9b-b054-cd1df11c603d-lokistack-gateway\") pod \"logging-loki-gateway-7bb4c68556-bj5ps\" (UID: \"44898682-d509-4f9b-b054-cd1df11c603d\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.977821 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/3aad27bd-694f-4ee7-a0a3-daf16b8d42e5-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-7bb4c68556-4mv8n\" (UID: \"3aad27bd-694f-4ee7-a0a3-daf16b8d42e5\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.977993 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/44898682-d509-4f9b-b054-cd1df11c603d-logging-loki-ca-bundle\") pod \"logging-loki-gateway-7bb4c68556-bj5ps\" (UID: \"44898682-d509-4f9b-b054-cd1df11c603d\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.978235 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3aad27bd-694f-4ee7-a0a3-daf16b8d42e5-logging-loki-ca-bundle\") pod \"logging-loki-gateway-7bb4c68556-4mv8n\" (UID: \"3aad27bd-694f-4ee7-a0a3-daf16b8d42e5\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.978417 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3aad27bd-694f-4ee7-a0a3-daf16b8d42e5-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-7bb4c68556-4mv8n\" (UID: \"3aad27bd-694f-4ee7-a0a3-daf16b8d42e5\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.978979 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/3aad27bd-694f-4ee7-a0a3-daf16b8d42e5-lokistack-gateway\") pod \"logging-loki-gateway-7bb4c68556-4mv8n\" (UID: \"3aad27bd-694f-4ee7-a0a3-daf16b8d42e5\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.980342 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/3aad27bd-694f-4ee7-a0a3-daf16b8d42e5-rbac\") pod \"logging-loki-gateway-7bb4c68556-4mv8n\" (UID: \"3aad27bd-694f-4ee7-a0a3-daf16b8d42e5\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.981337 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/44898682-d509-4f9b-b054-cd1df11c603d-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-7bb4c68556-bj5ps\" (UID: \"44898682-d509-4f9b-b054-cd1df11c603d\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.982378 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/44898682-d509-4f9b-b054-cd1df11c603d-tenants\") pod \"logging-loki-gateway-7bb4c68556-bj5ps\" (UID: \"44898682-d509-4f9b-b054-cd1df11c603d\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.984252 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/3aad27bd-694f-4ee7-a0a3-daf16b8d42e5-tenants\") pod \"logging-loki-gateway-7bb4c68556-4mv8n\" (UID: \"3aad27bd-694f-4ee7-a0a3-daf16b8d42e5\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n" Nov 25 09:49:08 crc kubenswrapper[4854]: I1125 09:49:08.999797 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h6bdj\" (UniqueName: \"kubernetes.io/projected/44898682-d509-4f9b-b054-cd1df11c603d-kube-api-access-h6bdj\") pod \"logging-loki-gateway-7bb4c68556-bj5ps\" (UID: \"44898682-d509-4f9b-b054-cd1df11c603d\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.007504 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jn4mq\" (UniqueName: \"kubernetes.io/projected/3aad27bd-694f-4ee7-a0a3-daf16b8d42e5-kube-api-access-jn4mq\") pod \"logging-loki-gateway-7bb4c68556-4mv8n\" (UID: \"3aad27bd-694f-4ee7-a0a3-daf16b8d42e5\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.011325 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jwxlh\" (UniqueName: \"kubernetes.io/projected/b011dfd2-12cc-456d-9106-ed460f6550a3-kube-api-access-jwxlh\") pod \"community-operators-t9zd2\" (UID: \"b011dfd2-12cc-456d-9106-ed460f6550a3\") " pod="openshift-marketplace/community-operators-t9zd2" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.055746 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-distributor-76cc67bf56-9jxqv"] Nov 25 09:49:09 crc kubenswrapper[4854]: W1125 09:49:09.087239 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod74f7ae80_fd76_4c73_85e8_a886a203733d.slice/crio-14e84a079cdc9547090ec625bebc6cd153843db03c609a3cef21c9ec3f361dea WatchSource:0}: Error finding container 14e84a079cdc9547090ec625bebc6cd153843db03c609a3cef21c9ec3f361dea: Status 404 returned error can't find the container with id 14e84a079cdc9547090ec625bebc6cd153843db03c609a3cef21c9ec3f361dea Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.091817 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t9zd2" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.111488 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-distributor-76cc67bf56-9jxqv" event={"ID":"74f7ae80-fd76-4c73-85e8-a886a203733d","Type":"ContainerStarted","Data":"14e84a079cdc9547090ec625bebc6cd153843db03c609a3cef21c9ec3f361dea"} Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.159367 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-querier-5895d59bb8-dzsk5"] Nov 25 09:49:09 crc kubenswrapper[4854]: W1125 09:49:09.168643 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd6c5fbf6_e379_49e2_a402_e36d171150dd.slice/crio-3e147b69e164b83ca943391b36bcebee24a0dd6ef94b29674f99e2928d43764b WatchSource:0}: Error finding container 3e147b69e164b83ca943391b36bcebee24a0dd6ef94b29674f99e2928d43764b: Status 404 returned error can't find the container with id 3e147b69e164b83ca943391b36bcebee24a0dd6ef94b29674f99e2928d43764b Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.369971 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-query-frontend-84558f7c9f-9xgzz"] Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.496515 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/44898682-d509-4f9b-b054-cd1df11c603d-tls-secret\") pod \"logging-loki-gateway-7bb4c68556-bj5ps\" (UID: \"44898682-d509-4f9b-b054-cd1df11c603d\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.496587 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/3aad27bd-694f-4ee7-a0a3-daf16b8d42e5-tls-secret\") pod \"logging-loki-gateway-7bb4c68556-4mv8n\" (UID: \"3aad27bd-694f-4ee7-a0a3-daf16b8d42e5\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.504304 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/3aad27bd-694f-4ee7-a0a3-daf16b8d42e5-tls-secret\") pod \"logging-loki-gateway-7bb4c68556-4mv8n\" (UID: \"3aad27bd-694f-4ee7-a0a3-daf16b8d42e5\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.515426 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/44898682-d509-4f9b-b054-cd1df11c603d-tls-secret\") pod \"logging-loki-gateway-7bb4c68556-bj5ps\" (UID: \"44898682-d509-4f9b-b054-cd1df11c603d\") " pod="openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.529314 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.530531 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.539126 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-ingester-http" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.544381 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-ingester-grpc" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.569306 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.604063 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ac43dd4-dc0c-4974-8521-11073254c3cd-config\") pod \"logging-loki-ingester-0\" (UID: \"3ac43dd4-dc0c-4974-8521-11073254c3cd\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.604109 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/3ac43dd4-dc0c-4974-8521-11073254c3cd-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"3ac43dd4-dc0c-4974-8521-11073254c3cd\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.604146 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vqrnr\" (UniqueName: \"kubernetes.io/projected/3ac43dd4-dc0c-4974-8521-11073254c3cd-kube-api-access-vqrnr\") pod \"logging-loki-ingester-0\" (UID: \"3ac43dd4-dc0c-4974-8521-11073254c3cd\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.604177 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-df0e89d2-47dd-4340-b40b-a56a7c30da01\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-df0e89d2-47dd-4340-b40b-a56a7c30da01\") pod \"logging-loki-ingester-0\" (UID: \"3ac43dd4-dc0c-4974-8521-11073254c3cd\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.604245 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-73740ca2-78ef-449a-8db8-755b862cd99b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-73740ca2-78ef-449a-8db8-755b862cd99b\") pod \"logging-loki-ingester-0\" (UID: \"3ac43dd4-dc0c-4974-8521-11073254c3cd\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.604285 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/3ac43dd4-dc0c-4974-8521-11073254c3cd-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"3ac43dd4-dc0c-4974-8521-11073254c3cd\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.604304 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3ac43dd4-dc0c-4974-8521-11073254c3cd-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"3ac43dd4-dc0c-4974-8521-11073254c3cd\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.604331 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/3ac43dd4-dc0c-4974-8521-11073254c3cd-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"3ac43dd4-dc0c-4974-8521-11073254c3cd\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.618729 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-t9zd2"] Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.634606 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.635789 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.642561 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-compactor-grpc" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.642969 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-compactor-http" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.666217 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Nov 25 09:49:09 crc kubenswrapper[4854]: W1125 09:49:09.666391 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb011dfd2_12cc_456d_9106_ed460f6550a3.slice/crio-be39c26813e5025fb4b129922361ea847a7f27cc8dded7f9cc7d13f961709525 WatchSource:0}: Error finding container be39c26813e5025fb4b129922361ea847a7f27cc8dded7f9cc7d13f961709525: Status 404 returned error can't find the container with id be39c26813e5025fb4b129922361ea847a7f27cc8dded7f9cc7d13f961709525 Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.711953 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/3ac43dd4-dc0c-4974-8521-11073254c3cd-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"3ac43dd4-dc0c-4974-8521-11073254c3cd\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.712102 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ac43dd4-dc0c-4974-8521-11073254c3cd-config\") pod \"logging-loki-ingester-0\" (UID: \"3ac43dd4-dc0c-4974-8521-11073254c3cd\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.712137 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/3ac43dd4-dc0c-4974-8521-11073254c3cd-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"3ac43dd4-dc0c-4974-8521-11073254c3cd\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.712187 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vqrnr\" (UniqueName: \"kubernetes.io/projected/3ac43dd4-dc0c-4974-8521-11073254c3cd-kube-api-access-vqrnr\") pod \"logging-loki-ingester-0\" (UID: \"3ac43dd4-dc0c-4974-8521-11073254c3cd\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.712242 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-df0e89d2-47dd-4340-b40b-a56a7c30da01\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-df0e89d2-47dd-4340-b40b-a56a7c30da01\") pod \"logging-loki-ingester-0\" (UID: \"3ac43dd4-dc0c-4974-8521-11073254c3cd\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.712343 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-73740ca2-78ef-449a-8db8-755b862cd99b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-73740ca2-78ef-449a-8db8-755b862cd99b\") pod \"logging-loki-ingester-0\" (UID: \"3ac43dd4-dc0c-4974-8521-11073254c3cd\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.712433 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/3ac43dd4-dc0c-4974-8521-11073254c3cd-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"3ac43dd4-dc0c-4974-8521-11073254c3cd\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.712464 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3ac43dd4-dc0c-4974-8521-11073254c3cd-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"3ac43dd4-dc0c-4974-8521-11073254c3cd\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.722940 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ac43dd4-dc0c-4974-8521-11073254c3cd-config\") pod \"logging-loki-ingester-0\" (UID: \"3ac43dd4-dc0c-4974-8521-11073254c3cd\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.726152 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/3ac43dd4-dc0c-4974-8521-11073254c3cd-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"3ac43dd4-dc0c-4974-8521-11073254c3cd\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.732771 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3ac43dd4-dc0c-4974-8521-11073254c3cd-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"3ac43dd4-dc0c-4974-8521-11073254c3cd\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.733112 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.735493 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/3ac43dd4-dc0c-4974-8521-11073254c3cd-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"3ac43dd4-dc0c-4974-8521-11073254c3cd\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.742221 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/3ac43dd4-dc0c-4974-8521-11073254c3cd-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"3ac43dd4-dc0c-4974-8521-11073254c3cd\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.744386 4854 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.744429 4854 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-73740ca2-78ef-449a-8db8-755b862cd99b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-73740ca2-78ef-449a-8db8-755b862cd99b\") pod \"logging-loki-ingester-0\" (UID: \"3ac43dd4-dc0c-4974-8521-11073254c3cd\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/89d6e408bec2235c2976aefa5eec131c091b7003e8ad051e859b6c7d4aa87993/globalmount\"" pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.744777 4854 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.744823 4854 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-df0e89d2-47dd-4340-b40b-a56a7c30da01\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-df0e89d2-47dd-4340-b40b-a56a7c30da01\") pod \"logging-loki-ingester-0\" (UID: \"3ac43dd4-dc0c-4974-8521-11073254c3cd\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/e8b5318c5e0f7170095ba839286311382107e5bf2eeab4001954f8d4fd722750/globalmount\"" pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.755877 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.772023 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vqrnr\" (UniqueName: \"kubernetes.io/projected/3ac43dd4-dc0c-4974-8521-11073254c3cd-kube-api-access-vqrnr\") pod \"logging-loki-ingester-0\" (UID: \"3ac43dd4-dc0c-4974-8521-11073254c3cd\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.810984 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.811965 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.814904 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.816105 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-c0db409b-36bd-46d5-bb20-f7e9cd3c4d66\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c0db409b-36bd-46d5-bb20-f7e9cd3c4d66\") pod \"logging-loki-compactor-0\" (UID: \"9578161a-d74b-48f8-9e3a-4ed85b4bb673\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.816218 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qc7nq\" (UniqueName: \"kubernetes.io/projected/9578161a-d74b-48f8-9e3a-4ed85b4bb673-kube-api-access-qc7nq\") pod \"logging-loki-compactor-0\" (UID: \"9578161a-d74b-48f8-9e3a-4ed85b4bb673\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.816264 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9578161a-d74b-48f8-9e3a-4ed85b4bb673-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"9578161a-d74b-48f8-9e3a-4ed85b4bb673\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.816347 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/9578161a-d74b-48f8-9e3a-4ed85b4bb673-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"9578161a-d74b-48f8-9e3a-4ed85b4bb673\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.816420 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/9578161a-d74b-48f8-9e3a-4ed85b4bb673-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"9578161a-d74b-48f8-9e3a-4ed85b4bb673\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.816468 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/9578161a-d74b-48f8-9e3a-4ed85b4bb673-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"9578161a-d74b-48f8-9e3a-4ed85b4bb673\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.816500 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9578161a-d74b-48f8-9e3a-4ed85b4bb673-config\") pod \"logging-loki-compactor-0\" (UID: \"9578161a-d74b-48f8-9e3a-4ed85b4bb673\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.818064 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-index-gateway-http" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.821101 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-index-gateway-grpc" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.853359 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-df0e89d2-47dd-4340-b40b-a56a7c30da01\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-df0e89d2-47dd-4340-b40b-a56a7c30da01\") pod \"logging-loki-ingester-0\" (UID: \"3ac43dd4-dc0c-4974-8521-11073254c3cd\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.883968 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-73740ca2-78ef-449a-8db8-755b862cd99b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-73740ca2-78ef-449a-8db8-755b862cd99b\") pod \"logging-loki-ingester-0\" (UID: \"3ac43dd4-dc0c-4974-8521-11073254c3cd\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.918112 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a72bbae8-a7e3-4605-b2b7-f9254913f1e5-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"a72bbae8-a7e3-4605-b2b7-f9254913f1e5\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.918214 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-c0db409b-36bd-46d5-bb20-f7e9cd3c4d66\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c0db409b-36bd-46d5-bb20-f7e9cd3c4d66\") pod \"logging-loki-compactor-0\" (UID: \"9578161a-d74b-48f8-9e3a-4ed85b4bb673\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.918246 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qc7nq\" (UniqueName: \"kubernetes.io/projected/9578161a-d74b-48f8-9e3a-4ed85b4bb673-kube-api-access-qc7nq\") pod \"logging-loki-compactor-0\" (UID: \"9578161a-d74b-48f8-9e3a-4ed85b4bb673\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.918295 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9gt29\" (UniqueName: \"kubernetes.io/projected/a72bbae8-a7e3-4605-b2b7-f9254913f1e5-kube-api-access-9gt29\") pod \"logging-loki-index-gateway-0\" (UID: \"a72bbae8-a7e3-4605-b2b7-f9254913f1e5\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.918324 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-209be852-defb-468f-959c-49d1b31bf045\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-209be852-defb-468f-959c-49d1b31bf045\") pod \"logging-loki-index-gateway-0\" (UID: \"a72bbae8-a7e3-4605-b2b7-f9254913f1e5\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.918357 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/9578161a-d74b-48f8-9e3a-4ed85b4bb673-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"9578161a-d74b-48f8-9e3a-4ed85b4bb673\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.918380 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/9578161a-d74b-48f8-9e3a-4ed85b4bb673-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"9578161a-d74b-48f8-9e3a-4ed85b4bb673\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.918408 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9578161a-d74b-48f8-9e3a-4ed85b4bb673-config\") pod \"logging-loki-compactor-0\" (UID: \"9578161a-d74b-48f8-9e3a-4ed85b4bb673\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.918436 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/a72bbae8-a7e3-4605-b2b7-f9254913f1e5-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"a72bbae8-a7e3-4605-b2b7-f9254913f1e5\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.918472 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9578161a-d74b-48f8-9e3a-4ed85b4bb673-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"9578161a-d74b-48f8-9e3a-4ed85b4bb673\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.918495 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/a72bbae8-a7e3-4605-b2b7-f9254913f1e5-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"a72bbae8-a7e3-4605-b2b7-f9254913f1e5\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.918521 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/9578161a-d74b-48f8-9e3a-4ed85b4bb673-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"9578161a-d74b-48f8-9e3a-4ed85b4bb673\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.918542 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a72bbae8-a7e3-4605-b2b7-f9254913f1e5-config\") pod \"logging-loki-index-gateway-0\" (UID: \"a72bbae8-a7e3-4605-b2b7-f9254913f1e5\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.918569 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/a72bbae8-a7e3-4605-b2b7-f9254913f1e5-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"a72bbae8-a7e3-4605-b2b7-f9254913f1e5\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.921538 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9578161a-d74b-48f8-9e3a-4ed85b4bb673-config\") pod \"logging-loki-compactor-0\" (UID: \"9578161a-d74b-48f8-9e3a-4ed85b4bb673\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.923378 4854 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.923410 4854 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-c0db409b-36bd-46d5-bb20-f7e9cd3c4d66\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c0db409b-36bd-46d5-bb20-f7e9cd3c4d66\") pod \"logging-loki-compactor-0\" (UID: \"9578161a-d74b-48f8-9e3a-4ed85b4bb673\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/8cfa8e29bbfd70222e26a53df947cff08ab6126b0ecda373e157e7f62d657db7/globalmount\"" pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.928930 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9578161a-d74b-48f8-9e3a-4ed85b4bb673-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"9578161a-d74b-48f8-9e3a-4ed85b4bb673\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.929854 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/9578161a-d74b-48f8-9e3a-4ed85b4bb673-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"9578161a-d74b-48f8-9e3a-4ed85b4bb673\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.930275 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/9578161a-d74b-48f8-9e3a-4ed85b4bb673-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"9578161a-d74b-48f8-9e3a-4ed85b4bb673\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.941311 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/9578161a-d74b-48f8-9e3a-4ed85b4bb673-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"9578161a-d74b-48f8-9e3a-4ed85b4bb673\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.947918 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qc7nq\" (UniqueName: \"kubernetes.io/projected/9578161a-d74b-48f8-9e3a-4ed85b4bb673-kube-api-access-qc7nq\") pod \"logging-loki-compactor-0\" (UID: \"9578161a-d74b-48f8-9e3a-4ed85b4bb673\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:49:09 crc kubenswrapper[4854]: I1125 09:49:09.993959 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-c0db409b-36bd-46d5-bb20-f7e9cd3c4d66\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c0db409b-36bd-46d5-bb20-f7e9cd3c4d66\") pod \"logging-loki-compactor-0\" (UID: \"9578161a-d74b-48f8-9e3a-4ed85b4bb673\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:49:10 crc kubenswrapper[4854]: I1125 09:49:10.019229 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a72bbae8-a7e3-4605-b2b7-f9254913f1e5-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"a72bbae8-a7e3-4605-b2b7-f9254913f1e5\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:49:10 crc kubenswrapper[4854]: I1125 09:49:10.019296 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9gt29\" (UniqueName: \"kubernetes.io/projected/a72bbae8-a7e3-4605-b2b7-f9254913f1e5-kube-api-access-9gt29\") pod \"logging-loki-index-gateway-0\" (UID: \"a72bbae8-a7e3-4605-b2b7-f9254913f1e5\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:49:10 crc kubenswrapper[4854]: I1125 09:49:10.019321 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-209be852-defb-468f-959c-49d1b31bf045\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-209be852-defb-468f-959c-49d1b31bf045\") pod \"logging-loki-index-gateway-0\" (UID: \"a72bbae8-a7e3-4605-b2b7-f9254913f1e5\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:49:10 crc kubenswrapper[4854]: I1125 09:49:10.019353 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/a72bbae8-a7e3-4605-b2b7-f9254913f1e5-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"a72bbae8-a7e3-4605-b2b7-f9254913f1e5\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:49:10 crc kubenswrapper[4854]: I1125 09:49:10.019387 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/a72bbae8-a7e3-4605-b2b7-f9254913f1e5-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"a72bbae8-a7e3-4605-b2b7-f9254913f1e5\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:49:10 crc kubenswrapper[4854]: I1125 09:49:10.019410 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a72bbae8-a7e3-4605-b2b7-f9254913f1e5-config\") pod \"logging-loki-index-gateway-0\" (UID: \"a72bbae8-a7e3-4605-b2b7-f9254913f1e5\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:49:10 crc kubenswrapper[4854]: I1125 09:49:10.019429 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/a72bbae8-a7e3-4605-b2b7-f9254913f1e5-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"a72bbae8-a7e3-4605-b2b7-f9254913f1e5\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:49:10 crc kubenswrapper[4854]: I1125 09:49:10.024581 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a72bbae8-a7e3-4605-b2b7-f9254913f1e5-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"a72bbae8-a7e3-4605-b2b7-f9254913f1e5\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:49:10 crc kubenswrapper[4854]: I1125 09:49:10.025403 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/a72bbae8-a7e3-4605-b2b7-f9254913f1e5-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"a72bbae8-a7e3-4605-b2b7-f9254913f1e5\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:49:10 crc kubenswrapper[4854]: I1125 09:49:10.026369 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a72bbae8-a7e3-4605-b2b7-f9254913f1e5-config\") pod \"logging-loki-index-gateway-0\" (UID: \"a72bbae8-a7e3-4605-b2b7-f9254913f1e5\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:49:10 crc kubenswrapper[4854]: I1125 09:49:10.027288 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/a72bbae8-a7e3-4605-b2b7-f9254913f1e5-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"a72bbae8-a7e3-4605-b2b7-f9254913f1e5\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:49:10 crc kubenswrapper[4854]: I1125 09:49:10.028368 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:49:10 crc kubenswrapper[4854]: I1125 09:49:10.029493 4854 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 09:49:10 crc kubenswrapper[4854]: I1125 09:49:10.029520 4854 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-209be852-defb-468f-959c-49d1b31bf045\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-209be852-defb-468f-959c-49d1b31bf045\") pod \"logging-loki-index-gateway-0\" (UID: \"a72bbae8-a7e3-4605-b2b7-f9254913f1e5\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/19ac6db4b9c2acea78f9245b9fb13befb1f86f16d1e17d2f34d921f17ade0eb4/globalmount\"" pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:49:10 crc kubenswrapper[4854]: I1125 09:49:10.034023 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/a72bbae8-a7e3-4605-b2b7-f9254913f1e5-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"a72bbae8-a7e3-4605-b2b7-f9254913f1e5\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:49:10 crc kubenswrapper[4854]: I1125 09:49:10.047410 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9gt29\" (UniqueName: \"kubernetes.io/projected/a72bbae8-a7e3-4605-b2b7-f9254913f1e5-kube-api-access-9gt29\") pod \"logging-loki-index-gateway-0\" (UID: \"a72bbae8-a7e3-4605-b2b7-f9254913f1e5\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:49:10 crc kubenswrapper[4854]: I1125 09:49:10.075979 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-209be852-defb-468f-959c-49d1b31bf045\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-209be852-defb-468f-959c-49d1b31bf045\") pod \"logging-loki-index-gateway-0\" (UID: \"a72bbae8-a7e3-4605-b2b7-f9254913f1e5\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:49:10 crc kubenswrapper[4854]: I1125 09:49:10.104094 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n"] Nov 25 09:49:10 crc kubenswrapper[4854]: I1125 09:49:10.123206 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-querier-5895d59bb8-dzsk5" event={"ID":"d6c5fbf6-e379-49e2-a402-e36d171150dd","Type":"ContainerStarted","Data":"3e147b69e164b83ca943391b36bcebee24a0dd6ef94b29674f99e2928d43764b"} Nov 25 09:49:10 crc kubenswrapper[4854]: I1125 09:49:10.125461 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-9xgzz" event={"ID":"111ecac4-100d-47df-97d7-23f5c048c7d5","Type":"ContainerStarted","Data":"366e130d8bff9b17224083194a3c4294712fdcfc894625988e9d798cf02b057a"} Nov 25 09:49:10 crc kubenswrapper[4854]: I1125 09:49:10.128923 4854 generic.go:334] "Generic (PLEG): container finished" podID="b011dfd2-12cc-456d-9106-ed460f6550a3" containerID="def2a3e1d156b41014da2d48fc168a2f4b8002f3e8f097eeeb479d3e5023e37f" exitCode=0 Nov 25 09:49:10 crc kubenswrapper[4854]: I1125 09:49:10.128959 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t9zd2" event={"ID":"b011dfd2-12cc-456d-9106-ed460f6550a3","Type":"ContainerDied","Data":"def2a3e1d156b41014da2d48fc168a2f4b8002f3e8f097eeeb479d3e5023e37f"} Nov 25 09:49:10 crc kubenswrapper[4854]: I1125 09:49:10.128979 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t9zd2" event={"ID":"b011dfd2-12cc-456d-9106-ed460f6550a3","Type":"ContainerStarted","Data":"be39c26813e5025fb4b129922361ea847a7f27cc8dded7f9cc7d13f961709525"} Nov 25 09:49:10 crc kubenswrapper[4854]: I1125 09:49:10.168060 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:49:10 crc kubenswrapper[4854]: I1125 09:49:10.175381 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:49:10 crc kubenswrapper[4854]: I1125 09:49:10.291249 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-4chks" Nov 25 09:49:10 crc kubenswrapper[4854]: I1125 09:49:10.291289 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-4chks" Nov 25 09:49:10 crc kubenswrapper[4854]: I1125 09:49:10.371348 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-4chks" Nov 25 09:49:10 crc kubenswrapper[4854]: I1125 09:49:10.421037 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps"] Nov 25 09:49:10 crc kubenswrapper[4854]: I1125 09:49:10.497653 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Nov 25 09:49:10 crc kubenswrapper[4854]: W1125 09:49:10.501586 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9578161a_d74b_48f8_9e3a_4ed85b4bb673.slice/crio-a03a4317ed5c643df44428465943b873be68b00b9bd76eade92e397f4dbd4abc WatchSource:0}: Error finding container a03a4317ed5c643df44428465943b873be68b00b9bd76eade92e397f4dbd4abc: Status 404 returned error can't find the container with id a03a4317ed5c643df44428465943b873be68b00b9bd76eade92e397f4dbd4abc Nov 25 09:49:10 crc kubenswrapper[4854]: I1125 09:49:10.710815 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Nov 25 09:49:10 crc kubenswrapper[4854]: I1125 09:49:10.775357 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Nov 25 09:49:10 crc kubenswrapper[4854]: W1125 09:49:10.784266 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda72bbae8_a7e3_4605_b2b7_f9254913f1e5.slice/crio-f57866628f91f0ecd5e8bdf74e55d455cbb184c4de7262623c98864b80cd9647 WatchSource:0}: Error finding container f57866628f91f0ecd5e8bdf74e55d455cbb184c4de7262623c98864b80cd9647: Status 404 returned error can't find the container with id f57866628f91f0ecd5e8bdf74e55d455cbb184c4de7262623c98864b80cd9647 Nov 25 09:49:11 crc kubenswrapper[4854]: I1125 09:49:11.137556 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-ingester-0" event={"ID":"3ac43dd4-dc0c-4974-8521-11073254c3cd","Type":"ContainerStarted","Data":"8380dbedf197624629e2a1c807152ec827a07d4214ac91f39d4c9bae96385090"} Nov 25 09:49:11 crc kubenswrapper[4854]: I1125 09:49:11.139057 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n" event={"ID":"3aad27bd-694f-4ee7-a0a3-daf16b8d42e5","Type":"ContainerStarted","Data":"a503137e07fc7337d4f76557ef631883fa46fa8b46cf187fcaea6e73c2584ed1"} Nov 25 09:49:11 crc kubenswrapper[4854]: I1125 09:49:11.140239 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-compactor-0" event={"ID":"9578161a-d74b-48f8-9e3a-4ed85b4bb673","Type":"ContainerStarted","Data":"a03a4317ed5c643df44428465943b873be68b00b9bd76eade92e397f4dbd4abc"} Nov 25 09:49:11 crc kubenswrapper[4854]: I1125 09:49:11.141612 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps" event={"ID":"44898682-d509-4f9b-b054-cd1df11c603d","Type":"ContainerStarted","Data":"6f1502f845f7bc68750bc47148646164cda80259ccc63dedd6fc14f2dc408f8e"} Nov 25 09:49:11 crc kubenswrapper[4854]: I1125 09:49:11.142830 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-index-gateway-0" event={"ID":"a72bbae8-a7e3-4605-b2b7-f9254913f1e5","Type":"ContainerStarted","Data":"f57866628f91f0ecd5e8bdf74e55d455cbb184c4de7262623c98864b80cd9647"} Nov 25 09:49:11 crc kubenswrapper[4854]: I1125 09:49:11.202143 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-4chks" Nov 25 09:49:12 crc kubenswrapper[4854]: I1125 09:49:12.709135 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4chks"] Nov 25 09:49:13 crc kubenswrapper[4854]: I1125 09:49:13.156423 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-4chks" podUID="6b2a9ddb-2e8a-4b49-bb83-e312418043d9" containerName="registry-server" containerID="cri-o://07a087d2525fa07b3b466775c1277f94400ea5b7fe2fd8784f7be2647677db1f" gracePeriod=2 Nov 25 09:49:14 crc kubenswrapper[4854]: I1125 09:49:14.175039 4854 generic.go:334] "Generic (PLEG): container finished" podID="6b2a9ddb-2e8a-4b49-bb83-e312418043d9" containerID="07a087d2525fa07b3b466775c1277f94400ea5b7fe2fd8784f7be2647677db1f" exitCode=0 Nov 25 09:49:14 crc kubenswrapper[4854]: I1125 09:49:14.175151 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4chks" event={"ID":"6b2a9ddb-2e8a-4b49-bb83-e312418043d9","Type":"ContainerDied","Data":"07a087d2525fa07b3b466775c1277f94400ea5b7fe2fd8784f7be2647677db1f"} Nov 25 09:49:14 crc kubenswrapper[4854]: I1125 09:49:14.175568 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4chks" event={"ID":"6b2a9ddb-2e8a-4b49-bb83-e312418043d9","Type":"ContainerDied","Data":"83a9c31e8354cecd316173c59830e1e8b30266710ead04e6f04a22595c648066"} Nov 25 09:49:14 crc kubenswrapper[4854]: I1125 09:49:14.175594 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="83a9c31e8354cecd316173c59830e1e8b30266710ead04e6f04a22595c648066" Nov 25 09:49:14 crc kubenswrapper[4854]: I1125 09:49:14.299168 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4chks" Nov 25 09:49:14 crc kubenswrapper[4854]: I1125 09:49:14.393512 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b2a9ddb-2e8a-4b49-bb83-e312418043d9-catalog-content\") pod \"6b2a9ddb-2e8a-4b49-bb83-e312418043d9\" (UID: \"6b2a9ddb-2e8a-4b49-bb83-e312418043d9\") " Nov 25 09:49:14 crc kubenswrapper[4854]: I1125 09:49:14.393548 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mgwgg\" (UniqueName: \"kubernetes.io/projected/6b2a9ddb-2e8a-4b49-bb83-e312418043d9-kube-api-access-mgwgg\") pod \"6b2a9ddb-2e8a-4b49-bb83-e312418043d9\" (UID: \"6b2a9ddb-2e8a-4b49-bb83-e312418043d9\") " Nov 25 09:49:14 crc kubenswrapper[4854]: I1125 09:49:14.393617 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b2a9ddb-2e8a-4b49-bb83-e312418043d9-utilities\") pod \"6b2a9ddb-2e8a-4b49-bb83-e312418043d9\" (UID: \"6b2a9ddb-2e8a-4b49-bb83-e312418043d9\") " Nov 25 09:49:14 crc kubenswrapper[4854]: I1125 09:49:14.399525 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6b2a9ddb-2e8a-4b49-bb83-e312418043d9-utilities" (OuterVolumeSpecName: "utilities") pod "6b2a9ddb-2e8a-4b49-bb83-e312418043d9" (UID: "6b2a9ddb-2e8a-4b49-bb83-e312418043d9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:49:14 crc kubenswrapper[4854]: I1125 09:49:14.402638 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b2a9ddb-2e8a-4b49-bb83-e312418043d9-kube-api-access-mgwgg" (OuterVolumeSpecName: "kube-api-access-mgwgg") pod "6b2a9ddb-2e8a-4b49-bb83-e312418043d9" (UID: "6b2a9ddb-2e8a-4b49-bb83-e312418043d9"). InnerVolumeSpecName "kube-api-access-mgwgg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:49:14 crc kubenswrapper[4854]: I1125 09:49:14.415896 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6b2a9ddb-2e8a-4b49-bb83-e312418043d9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6b2a9ddb-2e8a-4b49-bb83-e312418043d9" (UID: "6b2a9ddb-2e8a-4b49-bb83-e312418043d9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:49:14 crc kubenswrapper[4854]: I1125 09:49:14.495358 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b2a9ddb-2e8a-4b49-bb83-e312418043d9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:14 crc kubenswrapper[4854]: I1125 09:49:14.495380 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mgwgg\" (UniqueName: \"kubernetes.io/projected/6b2a9ddb-2e8a-4b49-bb83-e312418043d9-kube-api-access-mgwgg\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:14 crc kubenswrapper[4854]: I1125 09:49:14.495394 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b2a9ddb-2e8a-4b49-bb83-e312418043d9-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:15 crc kubenswrapper[4854]: I1125 09:49:15.186196 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-distributor-76cc67bf56-9jxqv" event={"ID":"74f7ae80-fd76-4c73-85e8-a886a203733d","Type":"ContainerStarted","Data":"97a5c87aa6ce59ee7c605229a9d46944a36aeee5eeb1e7e954b7474c07ddb560"} Nov 25 09:49:15 crc kubenswrapper[4854]: I1125 09:49:15.186356 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-distributor-76cc67bf56-9jxqv" Nov 25 09:49:15 crc kubenswrapper[4854]: I1125 09:49:15.188991 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-9xgzz" event={"ID":"111ecac4-100d-47df-97d7-23f5c048c7d5","Type":"ContainerStarted","Data":"2abd289bfab2b8bb758d334705ec69e6ecf91c254ac366c3488228e7623a06b0"} Nov 25 09:49:15 crc kubenswrapper[4854]: I1125 09:49:15.189141 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-9xgzz" Nov 25 09:49:15 crc kubenswrapper[4854]: I1125 09:49:15.192139 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n" event={"ID":"3aad27bd-694f-4ee7-a0a3-daf16b8d42e5","Type":"ContainerStarted","Data":"0b34a87b278488e7a5c70cb893b98385b237616224150672aaaa1cbda887f35b"} Nov 25 09:49:15 crc kubenswrapper[4854]: I1125 09:49:15.194342 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps" event={"ID":"44898682-d509-4f9b-b054-cd1df11c603d","Type":"ContainerStarted","Data":"b30429e253f36119dbe69da5c003134cc2dd17bad2fdccecd7256f7289263e14"} Nov 25 09:49:15 crc kubenswrapper[4854]: I1125 09:49:15.205208 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-index-gateway-0" event={"ID":"a72bbae8-a7e3-4605-b2b7-f9254913f1e5","Type":"ContainerStarted","Data":"1199c6bad58d62dc04ca1ba3773b54837e2544134814f7c0185d5fae81439817"} Nov 25 09:49:15 crc kubenswrapper[4854]: I1125 09:49:15.205993 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:49:15 crc kubenswrapper[4854]: I1125 09:49:15.207410 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-ingester-0" event={"ID":"3ac43dd4-dc0c-4974-8521-11073254c3cd","Type":"ContainerStarted","Data":"8b0ee6a9e49a95d352b27fcd73f1abc1e58544c70aafd132877a06171c78838a"} Nov 25 09:49:15 crc kubenswrapper[4854]: I1125 09:49:15.207496 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:49:15 crc kubenswrapper[4854]: I1125 09:49:15.209187 4854 generic.go:334] "Generic (PLEG): container finished" podID="b011dfd2-12cc-456d-9106-ed460f6550a3" containerID="29001e7a8c575de9abd5123a3f7b440c66222be710fcd2dac8ed666e8c893c78" exitCode=0 Nov 25 09:49:15 crc kubenswrapper[4854]: I1125 09:49:15.209232 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t9zd2" event={"ID":"b011dfd2-12cc-456d-9106-ed460f6550a3","Type":"ContainerDied","Data":"29001e7a8c575de9abd5123a3f7b440c66222be710fcd2dac8ed666e8c893c78"} Nov 25 09:49:15 crc kubenswrapper[4854]: I1125 09:49:15.212031 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-compactor-0" event={"ID":"9578161a-d74b-48f8-9e3a-4ed85b4bb673","Type":"ContainerStarted","Data":"e50215ae1b0360f989b470d461ce5a0e1ce2e19e73323d9673b5993088e9ff07"} Nov 25 09:49:15 crc kubenswrapper[4854]: I1125 09:49:15.212563 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:49:15 crc kubenswrapper[4854]: I1125 09:49:15.214310 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4chks" Nov 25 09:49:15 crc kubenswrapper[4854]: I1125 09:49:15.214397 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-querier-5895d59bb8-dzsk5" event={"ID":"d6c5fbf6-e379-49e2-a402-e36d171150dd","Type":"ContainerStarted","Data":"e352f3c2523f08dad05113f63ba27e988532785098cdc6da0bb0a5a8787e2169"} Nov 25 09:49:15 crc kubenswrapper[4854]: I1125 09:49:15.223294 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-distributor-76cc67bf56-9jxqv" podStartSLOduration=2.294414389 podStartE2EDuration="7.222720518s" podCreationTimestamp="2025-11-25 09:49:08 +0000 UTC" firstStartedPulling="2025-11-25 09:49:09.111888807 +0000 UTC m=+754.964882183" lastFinishedPulling="2025-11-25 09:49:14.040194936 +0000 UTC m=+759.893188312" observedRunningTime="2025-11-25 09:49:15.220130937 +0000 UTC m=+761.073124323" watchObservedRunningTime="2025-11-25 09:49:15.222720518 +0000 UTC m=+761.075713904" Nov 25 09:49:15 crc kubenswrapper[4854]: I1125 09:49:15.242183 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-9xgzz" podStartSLOduration=2.750707016 podStartE2EDuration="7.242152071s" podCreationTimestamp="2025-11-25 09:49:08 +0000 UTC" firstStartedPulling="2025-11-25 09:49:09.453858382 +0000 UTC m=+755.306851758" lastFinishedPulling="2025-11-25 09:49:13.945303417 +0000 UTC m=+759.798296813" observedRunningTime="2025-11-25 09:49:15.237960115 +0000 UTC m=+761.090953501" watchObservedRunningTime="2025-11-25 09:49:15.242152071 +0000 UTC m=+761.095145447" Nov 25 09:49:15 crc kubenswrapper[4854]: I1125 09:49:15.264150 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-ingester-0" podStartSLOduration=3.947181021 podStartE2EDuration="7.264128883s" podCreationTimestamp="2025-11-25 09:49:08 +0000 UTC" firstStartedPulling="2025-11-25 09:49:10.721864175 +0000 UTC m=+756.574857541" lastFinishedPulling="2025-11-25 09:49:14.038812027 +0000 UTC m=+759.891805403" observedRunningTime="2025-11-25 09:49:15.258007904 +0000 UTC m=+761.111001290" watchObservedRunningTime="2025-11-25 09:49:15.264128883 +0000 UTC m=+761.117122259" Nov 25 09:49:15 crc kubenswrapper[4854]: I1125 09:49:15.287642 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4chks"] Nov 25 09:49:15 crc kubenswrapper[4854]: I1125 09:49:15.294550 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-4chks"] Nov 25 09:49:15 crc kubenswrapper[4854]: I1125 09:49:15.312989 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-querier-5895d59bb8-dzsk5" podStartSLOduration=2.44159112 podStartE2EDuration="7.312968599s" podCreationTimestamp="2025-11-25 09:49:08 +0000 UTC" firstStartedPulling="2025-11-25 09:49:09.170111132 +0000 UTC m=+755.023104498" lastFinishedPulling="2025-11-25 09:49:14.041488601 +0000 UTC m=+759.894481977" observedRunningTime="2025-11-25 09:49:15.311437877 +0000 UTC m=+761.164431253" watchObservedRunningTime="2025-11-25 09:49:15.312968599 +0000 UTC m=+761.165961975" Nov 25 09:49:15 crc kubenswrapper[4854]: I1125 09:49:15.351446 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-compactor-0" podStartSLOduration=4.606580328 podStartE2EDuration="7.351425763s" podCreationTimestamp="2025-11-25 09:49:08 +0000 UTC" firstStartedPulling="2025-11-25 09:49:10.504136823 +0000 UTC m=+756.357130189" lastFinishedPulling="2025-11-25 09:49:13.248982248 +0000 UTC m=+759.101975624" observedRunningTime="2025-11-25 09:49:15.343229188 +0000 UTC m=+761.196222564" watchObservedRunningTime="2025-11-25 09:49:15.351425763 +0000 UTC m=+761.204419139" Nov 25 09:49:15 crc kubenswrapper[4854]: I1125 09:49:15.370818 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-index-gateway-0" podStartSLOduration=4.276383055 podStartE2EDuration="7.370800694s" podCreationTimestamp="2025-11-25 09:49:08 +0000 UTC" firstStartedPulling="2025-11-25 09:49:10.787118932 +0000 UTC m=+756.640112308" lastFinishedPulling="2025-11-25 09:49:13.881536571 +0000 UTC m=+759.734529947" observedRunningTime="2025-11-25 09:49:15.359217315 +0000 UTC m=+761.212210721" watchObservedRunningTime="2025-11-25 09:49:15.370800694 +0000 UTC m=+761.223794070" Nov 25 09:49:16 crc kubenswrapper[4854]: I1125 09:49:16.224588 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t9zd2" event={"ID":"b011dfd2-12cc-456d-9106-ed460f6550a3","Type":"ContainerStarted","Data":"70ffd606e653f15cd5e396d3ef6b4bdaadb4c4f197c53be6c27b3cf86da16c95"} Nov 25 09:49:16 crc kubenswrapper[4854]: I1125 09:49:16.225516 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-querier-5895d59bb8-dzsk5" Nov 25 09:49:17 crc kubenswrapper[4854]: I1125 09:49:17.024255 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6b2a9ddb-2e8a-4b49-bb83-e312418043d9" path="/var/lib/kubelet/pods/6b2a9ddb-2e8a-4b49-bb83-e312418043d9/volumes" Nov 25 09:49:19 crc kubenswrapper[4854]: I1125 09:49:19.093175 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-t9zd2" Nov 25 09:49:19 crc kubenswrapper[4854]: I1125 09:49:19.093472 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-t9zd2" Nov 25 09:49:19 crc kubenswrapper[4854]: I1125 09:49:19.142023 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-t9zd2" Nov 25 09:49:19 crc kubenswrapper[4854]: I1125 09:49:19.160823 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-t9zd2" podStartSLOduration=5.670089421 podStartE2EDuration="11.16080452s" podCreationTimestamp="2025-11-25 09:49:08 +0000 UTC" firstStartedPulling="2025-11-25 09:49:10.132178168 +0000 UTC m=+755.985171544" lastFinishedPulling="2025-11-25 09:49:15.622893267 +0000 UTC m=+761.475886643" observedRunningTime="2025-11-25 09:49:16.250981887 +0000 UTC m=+762.103975283" watchObservedRunningTime="2025-11-25 09:49:19.16080452 +0000 UTC m=+765.013797916" Nov 25 09:49:21 crc kubenswrapper[4854]: I1125 09:49:21.263969 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n" event={"ID":"3aad27bd-694f-4ee7-a0a3-daf16b8d42e5","Type":"ContainerStarted","Data":"d77d7f298721cf2c28255d77863f17a798fcd662408ec07df5476525077e1557"} Nov 25 09:49:21 crc kubenswrapper[4854]: I1125 09:49:21.264699 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n" Nov 25 09:49:21 crc kubenswrapper[4854]: I1125 09:49:21.264720 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n" Nov 25 09:49:21 crc kubenswrapper[4854]: I1125 09:49:21.266514 4854 patch_prober.go:28] interesting pod/logging-loki-gateway-7bb4c68556-4mv8n container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.76:8083/ready\": dial tcp 10.217.0.76:8083: connect: connection refused" start-of-body= Nov 25 09:49:21 crc kubenswrapper[4854]: I1125 09:49:21.266575 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n" podUID="3aad27bd-694f-4ee7-a0a3-daf16b8d42e5" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.76:8083/ready\": dial tcp 10.217.0.76:8083: connect: connection refused" Nov 25 09:49:21 crc kubenswrapper[4854]: I1125 09:49:21.268341 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps" event={"ID":"44898682-d509-4f9b-b054-cd1df11c603d","Type":"ContainerStarted","Data":"4dc65c9c2368905f2fab5af611d55e133409781028e7b22e11ed18bcf633f813"} Nov 25 09:49:21 crc kubenswrapper[4854]: I1125 09:49:21.269948 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps" Nov 25 09:49:21 crc kubenswrapper[4854]: I1125 09:49:21.269972 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps" Nov 25 09:49:21 crc kubenswrapper[4854]: I1125 09:49:21.273312 4854 patch_prober.go:28] interesting pod/logging-loki-gateway-7bb4c68556-bj5ps container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.79:8083/ready\": dial tcp 10.217.0.79:8083: connect: connection refused" start-of-body= Nov 25 09:49:21 crc kubenswrapper[4854]: I1125 09:49:21.273353 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps" podUID="44898682-d509-4f9b-b054-cd1df11c603d" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.79:8083/ready\": dial tcp 10.217.0.79:8083: connect: connection refused" Nov 25 09:49:21 crc kubenswrapper[4854]: I1125 09:49:21.281068 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n" Nov 25 09:49:21 crc kubenswrapper[4854]: I1125 09:49:21.285709 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps" Nov 25 09:49:21 crc kubenswrapper[4854]: I1125 09:49:21.293141 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n" podStartSLOduration=2.359704379 podStartE2EDuration="13.293122992s" podCreationTimestamp="2025-11-25 09:49:08 +0000 UTC" firstStartedPulling="2025-11-25 09:49:10.146097379 +0000 UTC m=+755.999090755" lastFinishedPulling="2025-11-25 09:49:21.079515982 +0000 UTC m=+766.932509368" observedRunningTime="2025-11-25 09:49:21.292111514 +0000 UTC m=+767.145104900" watchObservedRunningTime="2025-11-25 09:49:21.293122992 +0000 UTC m=+767.146116378" Nov 25 09:49:21 crc kubenswrapper[4854]: I1125 09:49:21.320799 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps" podStartSLOduration=2.671656311 podStartE2EDuration="13.320777569s" podCreationTimestamp="2025-11-25 09:49:08 +0000 UTC" firstStartedPulling="2025-11-25 09:49:10.432389559 +0000 UTC m=+756.285382935" lastFinishedPulling="2025-11-25 09:49:21.081510817 +0000 UTC m=+766.934504193" observedRunningTime="2025-11-25 09:49:21.317383246 +0000 UTC m=+767.170376632" watchObservedRunningTime="2025-11-25 09:49:21.320777569 +0000 UTC m=+767.173770935" Nov 25 09:49:22 crc kubenswrapper[4854]: I1125 09:49:22.282723 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-7bb4c68556-4mv8n" Nov 25 09:49:22 crc kubenswrapper[4854]: I1125 09:49:22.285231 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-7bb4c68556-bj5ps" Nov 25 09:49:25 crc kubenswrapper[4854]: I1125 09:49:25.029478 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:49:25 crc kubenswrapper[4854]: I1125 09:49:25.029852 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:49:29 crc kubenswrapper[4854]: I1125 09:49:29.198791 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-t9zd2" Nov 25 09:49:29 crc kubenswrapper[4854]: I1125 09:49:29.238385 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-t9zd2"] Nov 25 09:49:29 crc kubenswrapper[4854]: I1125 09:49:29.327307 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-t9zd2" podUID="b011dfd2-12cc-456d-9106-ed460f6550a3" containerName="registry-server" containerID="cri-o://70ffd606e653f15cd5e396d3ef6b4bdaadb4c4f197c53be6c27b3cf86da16c95" gracePeriod=2 Nov 25 09:49:29 crc kubenswrapper[4854]: I1125 09:49:29.713038 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t9zd2" Nov 25 09:49:29 crc kubenswrapper[4854]: I1125 09:49:29.905999 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b011dfd2-12cc-456d-9106-ed460f6550a3-utilities\") pod \"b011dfd2-12cc-456d-9106-ed460f6550a3\" (UID: \"b011dfd2-12cc-456d-9106-ed460f6550a3\") " Nov 25 09:49:29 crc kubenswrapper[4854]: I1125 09:49:29.906169 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jwxlh\" (UniqueName: \"kubernetes.io/projected/b011dfd2-12cc-456d-9106-ed460f6550a3-kube-api-access-jwxlh\") pod \"b011dfd2-12cc-456d-9106-ed460f6550a3\" (UID: \"b011dfd2-12cc-456d-9106-ed460f6550a3\") " Nov 25 09:49:29 crc kubenswrapper[4854]: I1125 09:49:29.906239 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b011dfd2-12cc-456d-9106-ed460f6550a3-catalog-content\") pod \"b011dfd2-12cc-456d-9106-ed460f6550a3\" (UID: \"b011dfd2-12cc-456d-9106-ed460f6550a3\") " Nov 25 09:49:29 crc kubenswrapper[4854]: I1125 09:49:29.907749 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b011dfd2-12cc-456d-9106-ed460f6550a3-utilities" (OuterVolumeSpecName: "utilities") pod "b011dfd2-12cc-456d-9106-ed460f6550a3" (UID: "b011dfd2-12cc-456d-9106-ed460f6550a3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:49:29 crc kubenswrapper[4854]: I1125 09:49:29.912367 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b011dfd2-12cc-456d-9106-ed460f6550a3-kube-api-access-jwxlh" (OuterVolumeSpecName: "kube-api-access-jwxlh") pod "b011dfd2-12cc-456d-9106-ed460f6550a3" (UID: "b011dfd2-12cc-456d-9106-ed460f6550a3"). InnerVolumeSpecName "kube-api-access-jwxlh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:49:29 crc kubenswrapper[4854]: I1125 09:49:29.966629 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b011dfd2-12cc-456d-9106-ed460f6550a3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b011dfd2-12cc-456d-9106-ed460f6550a3" (UID: "b011dfd2-12cc-456d-9106-ed460f6550a3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:49:30 crc kubenswrapper[4854]: I1125 09:49:30.008033 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b011dfd2-12cc-456d-9106-ed460f6550a3-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:30 crc kubenswrapper[4854]: I1125 09:49:30.008266 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jwxlh\" (UniqueName: \"kubernetes.io/projected/b011dfd2-12cc-456d-9106-ed460f6550a3-kube-api-access-jwxlh\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:30 crc kubenswrapper[4854]: I1125 09:49:30.008344 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b011dfd2-12cc-456d-9106-ed460f6550a3-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:30 crc kubenswrapper[4854]: I1125 09:49:30.042409 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:49:30 crc kubenswrapper[4854]: I1125 09:49:30.174471 4854 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: this instance owns no tokens Nov 25 09:49:30 crc kubenswrapper[4854]: I1125 09:49:30.174522 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="3ac43dd4-dc0c-4974-8521-11073254c3cd" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 25 09:49:30 crc kubenswrapper[4854]: I1125 09:49:30.182797 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:49:30 crc kubenswrapper[4854]: I1125 09:49:30.335142 4854 generic.go:334] "Generic (PLEG): container finished" podID="b011dfd2-12cc-456d-9106-ed460f6550a3" containerID="70ffd606e653f15cd5e396d3ef6b4bdaadb4c4f197c53be6c27b3cf86da16c95" exitCode=0 Nov 25 09:49:30 crc kubenswrapper[4854]: I1125 09:49:30.335193 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t9zd2" event={"ID":"b011dfd2-12cc-456d-9106-ed460f6550a3","Type":"ContainerDied","Data":"70ffd606e653f15cd5e396d3ef6b4bdaadb4c4f197c53be6c27b3cf86da16c95"} Nov 25 09:49:30 crc kubenswrapper[4854]: I1125 09:49:30.335231 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t9zd2" event={"ID":"b011dfd2-12cc-456d-9106-ed460f6550a3","Type":"ContainerDied","Data":"be39c26813e5025fb4b129922361ea847a7f27cc8dded7f9cc7d13f961709525"} Nov 25 09:49:30 crc kubenswrapper[4854]: I1125 09:49:30.335250 4854 scope.go:117] "RemoveContainer" containerID="70ffd606e653f15cd5e396d3ef6b4bdaadb4c4f197c53be6c27b3cf86da16c95" Nov 25 09:49:30 crc kubenswrapper[4854]: I1125 09:49:30.335202 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t9zd2" Nov 25 09:49:30 crc kubenswrapper[4854]: I1125 09:49:30.360365 4854 scope.go:117] "RemoveContainer" containerID="29001e7a8c575de9abd5123a3f7b440c66222be710fcd2dac8ed666e8c893c78" Nov 25 09:49:30 crc kubenswrapper[4854]: I1125 09:49:30.366598 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-t9zd2"] Nov 25 09:49:30 crc kubenswrapper[4854]: I1125 09:49:30.371866 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-t9zd2"] Nov 25 09:49:30 crc kubenswrapper[4854]: I1125 09:49:30.400254 4854 scope.go:117] "RemoveContainer" containerID="def2a3e1d156b41014da2d48fc168a2f4b8002f3e8f097eeeb479d3e5023e37f" Nov 25 09:49:30 crc kubenswrapper[4854]: I1125 09:49:30.417560 4854 scope.go:117] "RemoveContainer" containerID="70ffd606e653f15cd5e396d3ef6b4bdaadb4c4f197c53be6c27b3cf86da16c95" Nov 25 09:49:30 crc kubenswrapper[4854]: E1125 09:49:30.417989 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"70ffd606e653f15cd5e396d3ef6b4bdaadb4c4f197c53be6c27b3cf86da16c95\": container with ID starting with 70ffd606e653f15cd5e396d3ef6b4bdaadb4c4f197c53be6c27b3cf86da16c95 not found: ID does not exist" containerID="70ffd606e653f15cd5e396d3ef6b4bdaadb4c4f197c53be6c27b3cf86da16c95" Nov 25 09:49:30 crc kubenswrapper[4854]: I1125 09:49:30.418074 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70ffd606e653f15cd5e396d3ef6b4bdaadb4c4f197c53be6c27b3cf86da16c95"} err="failed to get container status \"70ffd606e653f15cd5e396d3ef6b4bdaadb4c4f197c53be6c27b3cf86da16c95\": rpc error: code = NotFound desc = could not find container \"70ffd606e653f15cd5e396d3ef6b4bdaadb4c4f197c53be6c27b3cf86da16c95\": container with ID starting with 70ffd606e653f15cd5e396d3ef6b4bdaadb4c4f197c53be6c27b3cf86da16c95 not found: ID does not exist" Nov 25 09:49:30 crc kubenswrapper[4854]: I1125 09:49:30.418148 4854 scope.go:117] "RemoveContainer" containerID="29001e7a8c575de9abd5123a3f7b440c66222be710fcd2dac8ed666e8c893c78" Nov 25 09:49:30 crc kubenswrapper[4854]: E1125 09:49:30.418653 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"29001e7a8c575de9abd5123a3f7b440c66222be710fcd2dac8ed666e8c893c78\": container with ID starting with 29001e7a8c575de9abd5123a3f7b440c66222be710fcd2dac8ed666e8c893c78 not found: ID does not exist" containerID="29001e7a8c575de9abd5123a3f7b440c66222be710fcd2dac8ed666e8c893c78" Nov 25 09:49:30 crc kubenswrapper[4854]: I1125 09:49:30.418722 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"29001e7a8c575de9abd5123a3f7b440c66222be710fcd2dac8ed666e8c893c78"} err="failed to get container status \"29001e7a8c575de9abd5123a3f7b440c66222be710fcd2dac8ed666e8c893c78\": rpc error: code = NotFound desc = could not find container \"29001e7a8c575de9abd5123a3f7b440c66222be710fcd2dac8ed666e8c893c78\": container with ID starting with 29001e7a8c575de9abd5123a3f7b440c66222be710fcd2dac8ed666e8c893c78 not found: ID does not exist" Nov 25 09:49:30 crc kubenswrapper[4854]: I1125 09:49:30.418748 4854 scope.go:117] "RemoveContainer" containerID="def2a3e1d156b41014da2d48fc168a2f4b8002f3e8f097eeeb479d3e5023e37f" Nov 25 09:49:30 crc kubenswrapper[4854]: E1125 09:49:30.419058 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"def2a3e1d156b41014da2d48fc168a2f4b8002f3e8f097eeeb479d3e5023e37f\": container with ID starting with def2a3e1d156b41014da2d48fc168a2f4b8002f3e8f097eeeb479d3e5023e37f not found: ID does not exist" containerID="def2a3e1d156b41014da2d48fc168a2f4b8002f3e8f097eeeb479d3e5023e37f" Nov 25 09:49:30 crc kubenswrapper[4854]: I1125 09:49:30.419084 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"def2a3e1d156b41014da2d48fc168a2f4b8002f3e8f097eeeb479d3e5023e37f"} err="failed to get container status \"def2a3e1d156b41014da2d48fc168a2f4b8002f3e8f097eeeb479d3e5023e37f\": rpc error: code = NotFound desc = could not find container \"def2a3e1d156b41014da2d48fc168a2f4b8002f3e8f097eeeb479d3e5023e37f\": container with ID starting with def2a3e1d156b41014da2d48fc168a2f4b8002f3e8f097eeeb479d3e5023e37f not found: ID does not exist" Nov 25 09:49:31 crc kubenswrapper[4854]: I1125 09:49:31.024331 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b011dfd2-12cc-456d-9106-ed460f6550a3" path="/var/lib/kubelet/pods/b011dfd2-12cc-456d-9106-ed460f6550a3/volumes" Nov 25 09:49:33 crc kubenswrapper[4854]: I1125 09:49:33.650585 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-zbbr7"] Nov 25 09:49:33 crc kubenswrapper[4854]: E1125 09:49:33.651412 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b2a9ddb-2e8a-4b49-bb83-e312418043d9" containerName="extract-utilities" Nov 25 09:49:33 crc kubenswrapper[4854]: I1125 09:49:33.651428 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b2a9ddb-2e8a-4b49-bb83-e312418043d9" containerName="extract-utilities" Nov 25 09:49:33 crc kubenswrapper[4854]: E1125 09:49:33.651446 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b2a9ddb-2e8a-4b49-bb83-e312418043d9" containerName="extract-content" Nov 25 09:49:33 crc kubenswrapper[4854]: I1125 09:49:33.651456 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b2a9ddb-2e8a-4b49-bb83-e312418043d9" containerName="extract-content" Nov 25 09:49:33 crc kubenswrapper[4854]: E1125 09:49:33.651467 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b011dfd2-12cc-456d-9106-ed460f6550a3" containerName="registry-server" Nov 25 09:49:33 crc kubenswrapper[4854]: I1125 09:49:33.651475 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="b011dfd2-12cc-456d-9106-ed460f6550a3" containerName="registry-server" Nov 25 09:49:33 crc kubenswrapper[4854]: E1125 09:49:33.651491 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b2a9ddb-2e8a-4b49-bb83-e312418043d9" containerName="registry-server" Nov 25 09:49:33 crc kubenswrapper[4854]: I1125 09:49:33.651499 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b2a9ddb-2e8a-4b49-bb83-e312418043d9" containerName="registry-server" Nov 25 09:49:33 crc kubenswrapper[4854]: E1125 09:49:33.651511 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b011dfd2-12cc-456d-9106-ed460f6550a3" containerName="extract-content" Nov 25 09:49:33 crc kubenswrapper[4854]: I1125 09:49:33.651519 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="b011dfd2-12cc-456d-9106-ed460f6550a3" containerName="extract-content" Nov 25 09:49:33 crc kubenswrapper[4854]: E1125 09:49:33.651546 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b011dfd2-12cc-456d-9106-ed460f6550a3" containerName="extract-utilities" Nov 25 09:49:33 crc kubenswrapper[4854]: I1125 09:49:33.651553 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="b011dfd2-12cc-456d-9106-ed460f6550a3" containerName="extract-utilities" Nov 25 09:49:33 crc kubenswrapper[4854]: I1125 09:49:33.651796 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b2a9ddb-2e8a-4b49-bb83-e312418043d9" containerName="registry-server" Nov 25 09:49:33 crc kubenswrapper[4854]: I1125 09:49:33.651821 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="b011dfd2-12cc-456d-9106-ed460f6550a3" containerName="registry-server" Nov 25 09:49:33 crc kubenswrapper[4854]: I1125 09:49:33.653083 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zbbr7" Nov 25 09:49:33 crc kubenswrapper[4854]: I1125 09:49:33.669482 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zbbr7"] Nov 25 09:49:33 crc kubenswrapper[4854]: I1125 09:49:33.766225 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de687d17-1331-4b02-aaae-708d495a7e82-utilities\") pod \"certified-operators-zbbr7\" (UID: \"de687d17-1331-4b02-aaae-708d495a7e82\") " pod="openshift-marketplace/certified-operators-zbbr7" Nov 25 09:49:33 crc kubenswrapper[4854]: I1125 09:49:33.766294 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de687d17-1331-4b02-aaae-708d495a7e82-catalog-content\") pod \"certified-operators-zbbr7\" (UID: \"de687d17-1331-4b02-aaae-708d495a7e82\") " pod="openshift-marketplace/certified-operators-zbbr7" Nov 25 09:49:33 crc kubenswrapper[4854]: I1125 09:49:33.766458 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rldxg\" (UniqueName: \"kubernetes.io/projected/de687d17-1331-4b02-aaae-708d495a7e82-kube-api-access-rldxg\") pod \"certified-operators-zbbr7\" (UID: \"de687d17-1331-4b02-aaae-708d495a7e82\") " pod="openshift-marketplace/certified-operators-zbbr7" Nov 25 09:49:33 crc kubenswrapper[4854]: I1125 09:49:33.868116 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rldxg\" (UniqueName: \"kubernetes.io/projected/de687d17-1331-4b02-aaae-708d495a7e82-kube-api-access-rldxg\") pod \"certified-operators-zbbr7\" (UID: \"de687d17-1331-4b02-aaae-708d495a7e82\") " pod="openshift-marketplace/certified-operators-zbbr7" Nov 25 09:49:33 crc kubenswrapper[4854]: I1125 09:49:33.868276 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de687d17-1331-4b02-aaae-708d495a7e82-utilities\") pod \"certified-operators-zbbr7\" (UID: \"de687d17-1331-4b02-aaae-708d495a7e82\") " pod="openshift-marketplace/certified-operators-zbbr7" Nov 25 09:49:33 crc kubenswrapper[4854]: I1125 09:49:33.868311 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de687d17-1331-4b02-aaae-708d495a7e82-catalog-content\") pod \"certified-operators-zbbr7\" (UID: \"de687d17-1331-4b02-aaae-708d495a7e82\") " pod="openshift-marketplace/certified-operators-zbbr7" Nov 25 09:49:33 crc kubenswrapper[4854]: I1125 09:49:33.868854 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de687d17-1331-4b02-aaae-708d495a7e82-utilities\") pod \"certified-operators-zbbr7\" (UID: \"de687d17-1331-4b02-aaae-708d495a7e82\") " pod="openshift-marketplace/certified-operators-zbbr7" Nov 25 09:49:33 crc kubenswrapper[4854]: I1125 09:49:33.868896 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de687d17-1331-4b02-aaae-708d495a7e82-catalog-content\") pod \"certified-operators-zbbr7\" (UID: \"de687d17-1331-4b02-aaae-708d495a7e82\") " pod="openshift-marketplace/certified-operators-zbbr7" Nov 25 09:49:33 crc kubenswrapper[4854]: I1125 09:49:33.893047 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rldxg\" (UniqueName: \"kubernetes.io/projected/de687d17-1331-4b02-aaae-708d495a7e82-kube-api-access-rldxg\") pod \"certified-operators-zbbr7\" (UID: \"de687d17-1331-4b02-aaae-708d495a7e82\") " pod="openshift-marketplace/certified-operators-zbbr7" Nov 25 09:49:33 crc kubenswrapper[4854]: I1125 09:49:33.989122 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zbbr7" Nov 25 09:49:34 crc kubenswrapper[4854]: I1125 09:49:34.469400 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zbbr7"] Nov 25 09:49:35 crc kubenswrapper[4854]: I1125 09:49:35.396685 4854 generic.go:334] "Generic (PLEG): container finished" podID="de687d17-1331-4b02-aaae-708d495a7e82" containerID="ed933d93c6902e952335d517d68a0fa181bf00bab614d8a8f996291745f273de" exitCode=0 Nov 25 09:49:35 crc kubenswrapper[4854]: I1125 09:49:35.396760 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zbbr7" event={"ID":"de687d17-1331-4b02-aaae-708d495a7e82","Type":"ContainerDied","Data":"ed933d93c6902e952335d517d68a0fa181bf00bab614d8a8f996291745f273de"} Nov 25 09:49:35 crc kubenswrapper[4854]: I1125 09:49:35.397134 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zbbr7" event={"ID":"de687d17-1331-4b02-aaae-708d495a7e82","Type":"ContainerStarted","Data":"99b4a3f21009072785ba0a0cf77d983c0ba0f23a2af3054b5ba6149bf602ef94"} Nov 25 09:49:36 crc kubenswrapper[4854]: I1125 09:49:36.406021 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zbbr7" event={"ID":"de687d17-1331-4b02-aaae-708d495a7e82","Type":"ContainerStarted","Data":"a6a594960fbf7957e30eb2f5a28686430c77094610e2773362801127f8cae531"} Nov 25 09:49:37 crc kubenswrapper[4854]: I1125 09:49:37.413993 4854 generic.go:334] "Generic (PLEG): container finished" podID="de687d17-1331-4b02-aaae-708d495a7e82" containerID="a6a594960fbf7957e30eb2f5a28686430c77094610e2773362801127f8cae531" exitCode=0 Nov 25 09:49:37 crc kubenswrapper[4854]: I1125 09:49:37.414282 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zbbr7" event={"ID":"de687d17-1331-4b02-aaae-708d495a7e82","Type":"ContainerDied","Data":"a6a594960fbf7957e30eb2f5a28686430c77094610e2773362801127f8cae531"} Nov 25 09:49:38 crc kubenswrapper[4854]: I1125 09:49:38.424726 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zbbr7" event={"ID":"de687d17-1331-4b02-aaae-708d495a7e82","Type":"ContainerStarted","Data":"1f729a049238a61a7a15f9e26e8962c6dccca42d9ad698efec54fce26b8f9c15"} Nov 25 09:49:38 crc kubenswrapper[4854]: I1125 09:49:38.448828 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-zbbr7" podStartSLOduration=3.000972396 podStartE2EDuration="5.448804787s" podCreationTimestamp="2025-11-25 09:49:33 +0000 UTC" firstStartedPulling="2025-11-25 09:49:35.399115354 +0000 UTC m=+781.252108730" lastFinishedPulling="2025-11-25 09:49:37.846947745 +0000 UTC m=+783.699941121" observedRunningTime="2025-11-25 09:49:38.442903735 +0000 UTC m=+784.295897151" watchObservedRunningTime="2025-11-25 09:49:38.448804787 +0000 UTC m=+784.301798173" Nov 25 09:49:38 crc kubenswrapper[4854]: I1125 09:49:38.646365 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-distributor-76cc67bf56-9jxqv" Nov 25 09:49:38 crc kubenswrapper[4854]: I1125 09:49:38.840154 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-querier-5895d59bb8-dzsk5" Nov 25 09:49:38 crc kubenswrapper[4854]: I1125 09:49:38.966784 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-9xgzz" Nov 25 09:49:40 crc kubenswrapper[4854]: I1125 09:49:40.177485 4854 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: this instance owns no tokens Nov 25 09:49:40 crc kubenswrapper[4854]: I1125 09:49:40.177882 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="3ac43dd4-dc0c-4974-8521-11073254c3cd" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 25 09:49:43 crc kubenswrapper[4854]: I1125 09:49:43.989462 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-zbbr7" Nov 25 09:49:43 crc kubenswrapper[4854]: I1125 09:49:43.990486 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-zbbr7" Nov 25 09:49:44 crc kubenswrapper[4854]: I1125 09:49:44.059281 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-zbbr7" Nov 25 09:49:44 crc kubenswrapper[4854]: I1125 09:49:44.532530 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-zbbr7" Nov 25 09:49:44 crc kubenswrapper[4854]: I1125 09:49:44.575008 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zbbr7"] Nov 25 09:49:46 crc kubenswrapper[4854]: I1125 09:49:46.485083 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-zbbr7" podUID="de687d17-1331-4b02-aaae-708d495a7e82" containerName="registry-server" containerID="cri-o://1f729a049238a61a7a15f9e26e8962c6dccca42d9ad698efec54fce26b8f9c15" gracePeriod=2 Nov 25 09:49:46 crc kubenswrapper[4854]: I1125 09:49:46.830759 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zbbr7" Nov 25 09:49:46 crc kubenswrapper[4854]: I1125 09:49:46.886502 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de687d17-1331-4b02-aaae-708d495a7e82-catalog-content\") pod \"de687d17-1331-4b02-aaae-708d495a7e82\" (UID: \"de687d17-1331-4b02-aaae-708d495a7e82\") " Nov 25 09:49:46 crc kubenswrapper[4854]: I1125 09:49:46.886597 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rldxg\" (UniqueName: \"kubernetes.io/projected/de687d17-1331-4b02-aaae-708d495a7e82-kube-api-access-rldxg\") pod \"de687d17-1331-4b02-aaae-708d495a7e82\" (UID: \"de687d17-1331-4b02-aaae-708d495a7e82\") " Nov 25 09:49:46 crc kubenswrapper[4854]: I1125 09:49:46.886616 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de687d17-1331-4b02-aaae-708d495a7e82-utilities\") pod \"de687d17-1331-4b02-aaae-708d495a7e82\" (UID: \"de687d17-1331-4b02-aaae-708d495a7e82\") " Nov 25 09:49:46 crc kubenswrapper[4854]: I1125 09:49:46.887759 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de687d17-1331-4b02-aaae-708d495a7e82-utilities" (OuterVolumeSpecName: "utilities") pod "de687d17-1331-4b02-aaae-708d495a7e82" (UID: "de687d17-1331-4b02-aaae-708d495a7e82"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:49:46 crc kubenswrapper[4854]: I1125 09:49:46.892482 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de687d17-1331-4b02-aaae-708d495a7e82-kube-api-access-rldxg" (OuterVolumeSpecName: "kube-api-access-rldxg") pod "de687d17-1331-4b02-aaae-708d495a7e82" (UID: "de687d17-1331-4b02-aaae-708d495a7e82"). InnerVolumeSpecName "kube-api-access-rldxg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:49:46 crc kubenswrapper[4854]: I1125 09:49:46.936877 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de687d17-1331-4b02-aaae-708d495a7e82-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "de687d17-1331-4b02-aaae-708d495a7e82" (UID: "de687d17-1331-4b02-aaae-708d495a7e82"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:49:46 crc kubenswrapper[4854]: I1125 09:49:46.988523 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rldxg\" (UniqueName: \"kubernetes.io/projected/de687d17-1331-4b02-aaae-708d495a7e82-kube-api-access-rldxg\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:46 crc kubenswrapper[4854]: I1125 09:49:46.988575 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de687d17-1331-4b02-aaae-708d495a7e82-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:46 crc kubenswrapper[4854]: I1125 09:49:46.988588 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de687d17-1331-4b02-aaae-708d495a7e82-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:47 crc kubenswrapper[4854]: I1125 09:49:47.496249 4854 generic.go:334] "Generic (PLEG): container finished" podID="de687d17-1331-4b02-aaae-708d495a7e82" containerID="1f729a049238a61a7a15f9e26e8962c6dccca42d9ad698efec54fce26b8f9c15" exitCode=0 Nov 25 09:49:47 crc kubenswrapper[4854]: I1125 09:49:47.496333 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zbbr7" event={"ID":"de687d17-1331-4b02-aaae-708d495a7e82","Type":"ContainerDied","Data":"1f729a049238a61a7a15f9e26e8962c6dccca42d9ad698efec54fce26b8f9c15"} Nov 25 09:49:47 crc kubenswrapper[4854]: I1125 09:49:47.496634 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zbbr7" event={"ID":"de687d17-1331-4b02-aaae-708d495a7e82","Type":"ContainerDied","Data":"99b4a3f21009072785ba0a0cf77d983c0ba0f23a2af3054b5ba6149bf602ef94"} Nov 25 09:49:47 crc kubenswrapper[4854]: I1125 09:49:47.496365 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zbbr7" Nov 25 09:49:47 crc kubenswrapper[4854]: I1125 09:49:47.496662 4854 scope.go:117] "RemoveContainer" containerID="1f729a049238a61a7a15f9e26e8962c6dccca42d9ad698efec54fce26b8f9c15" Nov 25 09:49:47 crc kubenswrapper[4854]: I1125 09:49:47.521483 4854 scope.go:117] "RemoveContainer" containerID="a6a594960fbf7957e30eb2f5a28686430c77094610e2773362801127f8cae531" Nov 25 09:49:47 crc kubenswrapper[4854]: I1125 09:49:47.540124 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zbbr7"] Nov 25 09:49:47 crc kubenswrapper[4854]: I1125 09:49:47.541572 4854 scope.go:117] "RemoveContainer" containerID="ed933d93c6902e952335d517d68a0fa181bf00bab614d8a8f996291745f273de" Nov 25 09:49:47 crc kubenswrapper[4854]: I1125 09:49:47.545826 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-zbbr7"] Nov 25 09:49:47 crc kubenswrapper[4854]: I1125 09:49:47.565800 4854 scope.go:117] "RemoveContainer" containerID="1f729a049238a61a7a15f9e26e8962c6dccca42d9ad698efec54fce26b8f9c15" Nov 25 09:49:47 crc kubenswrapper[4854]: E1125 09:49:47.566327 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1f729a049238a61a7a15f9e26e8962c6dccca42d9ad698efec54fce26b8f9c15\": container with ID starting with 1f729a049238a61a7a15f9e26e8962c6dccca42d9ad698efec54fce26b8f9c15 not found: ID does not exist" containerID="1f729a049238a61a7a15f9e26e8962c6dccca42d9ad698efec54fce26b8f9c15" Nov 25 09:49:47 crc kubenswrapper[4854]: I1125 09:49:47.566369 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1f729a049238a61a7a15f9e26e8962c6dccca42d9ad698efec54fce26b8f9c15"} err="failed to get container status \"1f729a049238a61a7a15f9e26e8962c6dccca42d9ad698efec54fce26b8f9c15\": rpc error: code = NotFound desc = could not find container \"1f729a049238a61a7a15f9e26e8962c6dccca42d9ad698efec54fce26b8f9c15\": container with ID starting with 1f729a049238a61a7a15f9e26e8962c6dccca42d9ad698efec54fce26b8f9c15 not found: ID does not exist" Nov 25 09:49:47 crc kubenswrapper[4854]: I1125 09:49:47.566397 4854 scope.go:117] "RemoveContainer" containerID="a6a594960fbf7957e30eb2f5a28686430c77094610e2773362801127f8cae531" Nov 25 09:49:47 crc kubenswrapper[4854]: E1125 09:49:47.566760 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a6a594960fbf7957e30eb2f5a28686430c77094610e2773362801127f8cae531\": container with ID starting with a6a594960fbf7957e30eb2f5a28686430c77094610e2773362801127f8cae531 not found: ID does not exist" containerID="a6a594960fbf7957e30eb2f5a28686430c77094610e2773362801127f8cae531" Nov 25 09:49:47 crc kubenswrapper[4854]: I1125 09:49:47.566826 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6a594960fbf7957e30eb2f5a28686430c77094610e2773362801127f8cae531"} err="failed to get container status \"a6a594960fbf7957e30eb2f5a28686430c77094610e2773362801127f8cae531\": rpc error: code = NotFound desc = could not find container \"a6a594960fbf7957e30eb2f5a28686430c77094610e2773362801127f8cae531\": container with ID starting with a6a594960fbf7957e30eb2f5a28686430c77094610e2773362801127f8cae531 not found: ID does not exist" Nov 25 09:49:47 crc kubenswrapper[4854]: I1125 09:49:47.566877 4854 scope.go:117] "RemoveContainer" containerID="ed933d93c6902e952335d517d68a0fa181bf00bab614d8a8f996291745f273de" Nov 25 09:49:47 crc kubenswrapper[4854]: E1125 09:49:47.567184 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ed933d93c6902e952335d517d68a0fa181bf00bab614d8a8f996291745f273de\": container with ID starting with ed933d93c6902e952335d517d68a0fa181bf00bab614d8a8f996291745f273de not found: ID does not exist" containerID="ed933d93c6902e952335d517d68a0fa181bf00bab614d8a8f996291745f273de" Nov 25 09:49:47 crc kubenswrapper[4854]: I1125 09:49:47.567234 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed933d93c6902e952335d517d68a0fa181bf00bab614d8a8f996291745f273de"} err="failed to get container status \"ed933d93c6902e952335d517d68a0fa181bf00bab614d8a8f996291745f273de\": rpc error: code = NotFound desc = could not find container \"ed933d93c6902e952335d517d68a0fa181bf00bab614d8a8f996291745f273de\": container with ID starting with ed933d93c6902e952335d517d68a0fa181bf00bab614d8a8f996291745f273de not found: ID does not exist" Nov 25 09:49:49 crc kubenswrapper[4854]: I1125 09:49:49.021589 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de687d17-1331-4b02-aaae-708d495a7e82" path="/var/lib/kubelet/pods/de687d17-1331-4b02-aaae-708d495a7e82/volumes" Nov 25 09:49:50 crc kubenswrapper[4854]: I1125 09:49:50.173155 4854 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: waiting for 15s after being ready Nov 25 09:49:50 crc kubenswrapper[4854]: I1125 09:49:50.173214 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="3ac43dd4-dc0c-4974-8521-11073254c3cd" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 25 09:49:55 crc kubenswrapper[4854]: I1125 09:49:55.029161 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:49:55 crc kubenswrapper[4854]: I1125 09:49:55.029856 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:50:00 crc kubenswrapper[4854]: I1125 09:50:00.176124 4854 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: waiting for 15s after being ready Nov 25 09:50:00 crc kubenswrapper[4854]: I1125 09:50:00.176716 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="3ac43dd4-dc0c-4974-8521-11073254c3cd" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 25 09:50:10 crc kubenswrapper[4854]: I1125 09:50:10.176221 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:50:25 crc kubenswrapper[4854]: I1125 09:50:25.029167 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:50:25 crc kubenswrapper[4854]: I1125 09:50:25.029895 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:50:25 crc kubenswrapper[4854]: I1125 09:50:25.029948 4854 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" Nov 25 09:50:25 crc kubenswrapper[4854]: I1125 09:50:25.030959 4854 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9b86f8830130949aa485656adb170193fb5c4c66ab6f65d45cd6ab7997ce2f21"} pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:50:25 crc kubenswrapper[4854]: I1125 09:50:25.031084 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" containerID="cri-o://9b86f8830130949aa485656adb170193fb5c4c66ab6f65d45cd6ab7997ce2f21" gracePeriod=600 Nov 25 09:50:25 crc kubenswrapper[4854]: I1125 09:50:25.797969 4854 generic.go:334] "Generic (PLEG): container finished" podID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerID="9b86f8830130949aa485656adb170193fb5c4c66ab6f65d45cd6ab7997ce2f21" exitCode=0 Nov 25 09:50:25 crc kubenswrapper[4854]: I1125 09:50:25.798023 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerDied","Data":"9b86f8830130949aa485656adb170193fb5c4c66ab6f65d45cd6ab7997ce2f21"} Nov 25 09:50:25 crc kubenswrapper[4854]: I1125 09:50:25.798599 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerStarted","Data":"5e14519da63f04cdbdb7f55713d2722df29a5332d12866e5327a4659d36c5bcf"} Nov 25 09:50:25 crc kubenswrapper[4854]: I1125 09:50:25.798646 4854 scope.go:117] "RemoveContainer" containerID="c31858ec557866c59816aa8a24974a2c5810f8450caa75c3ab5f3a290cd6674c" Nov 25 09:50:28 crc kubenswrapper[4854]: I1125 09:50:28.933328 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/collector-nglbt"] Nov 25 09:50:28 crc kubenswrapper[4854]: E1125 09:50:28.934317 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de687d17-1331-4b02-aaae-708d495a7e82" containerName="extract-utilities" Nov 25 09:50:28 crc kubenswrapper[4854]: I1125 09:50:28.934331 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="de687d17-1331-4b02-aaae-708d495a7e82" containerName="extract-utilities" Nov 25 09:50:28 crc kubenswrapper[4854]: E1125 09:50:28.934347 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de687d17-1331-4b02-aaae-708d495a7e82" containerName="extract-content" Nov 25 09:50:28 crc kubenswrapper[4854]: I1125 09:50:28.934353 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="de687d17-1331-4b02-aaae-708d495a7e82" containerName="extract-content" Nov 25 09:50:28 crc kubenswrapper[4854]: E1125 09:50:28.934366 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de687d17-1331-4b02-aaae-708d495a7e82" containerName="registry-server" Nov 25 09:50:28 crc kubenswrapper[4854]: I1125 09:50:28.934372 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="de687d17-1331-4b02-aaae-708d495a7e82" containerName="registry-server" Nov 25 09:50:28 crc kubenswrapper[4854]: I1125 09:50:28.934517 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="de687d17-1331-4b02-aaae-708d495a7e82" containerName="registry-server" Nov 25 09:50:28 crc kubenswrapper[4854]: I1125 09:50:28.935045 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-nglbt" Nov 25 09:50:28 crc kubenswrapper[4854]: I1125 09:50:28.936336 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-syslog-receiver" Nov 25 09:50:28 crc kubenswrapper[4854]: I1125 09:50:28.936721 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-metrics" Nov 25 09:50:28 crc kubenswrapper[4854]: I1125 09:50:28.937030 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-token" Nov 25 09:50:28 crc kubenswrapper[4854]: I1125 09:50:28.939103 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-dockercfg-8tmlk" Nov 25 09:50:28 crc kubenswrapper[4854]: I1125 09:50:28.939602 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-config" Nov 25 09:50:28 crc kubenswrapper[4854]: I1125 09:50:28.948288 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-trustbundle" Nov 25 09:50:28 crc kubenswrapper[4854]: I1125 09:50:28.960640 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-nglbt"] Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.009166 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-logging/collector-nglbt"] Nov 25 09:50:29 crc kubenswrapper[4854]: E1125 09:50:29.010146 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[collector-syslog-receiver collector-token config config-openshift-service-cacrt datadir entrypoint kube-api-access-strkx metrics sa-token tmp trusted-ca], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openshift-logging/collector-nglbt" podUID="7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.076093 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-sa-token\") pod \"collector-nglbt\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " pod="openshift-logging/collector-nglbt" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.076357 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-trusted-ca\") pod \"collector-nglbt\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " pod="openshift-logging/collector-nglbt" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.076430 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-collector-token\") pod \"collector-nglbt\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " pod="openshift-logging/collector-nglbt" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.076506 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-strkx\" (UniqueName: \"kubernetes.io/projected/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-kube-api-access-strkx\") pod \"collector-nglbt\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " pod="openshift-logging/collector-nglbt" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.076595 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-datadir\") pod \"collector-nglbt\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " pod="openshift-logging/collector-nglbt" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.076730 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-config\") pod \"collector-nglbt\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " pod="openshift-logging/collector-nglbt" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.076810 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-collector-syslog-receiver\") pod \"collector-nglbt\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " pod="openshift-logging/collector-nglbt" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.076887 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-metrics\") pod \"collector-nglbt\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " pod="openshift-logging/collector-nglbt" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.076995 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-entrypoint\") pod \"collector-nglbt\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " pod="openshift-logging/collector-nglbt" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.077064 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-config-openshift-service-cacrt\") pod \"collector-nglbt\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " pod="openshift-logging/collector-nglbt" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.077133 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-tmp\") pod \"collector-nglbt\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " pod="openshift-logging/collector-nglbt" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.178775 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-metrics\") pod \"collector-nglbt\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " pod="openshift-logging/collector-nglbt" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.178901 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-entrypoint\") pod \"collector-nglbt\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " pod="openshift-logging/collector-nglbt" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.178932 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-config-openshift-service-cacrt\") pod \"collector-nglbt\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " pod="openshift-logging/collector-nglbt" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.178958 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-tmp\") pod \"collector-nglbt\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " pod="openshift-logging/collector-nglbt" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.179015 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-sa-token\") pod \"collector-nglbt\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " pod="openshift-logging/collector-nglbt" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.179048 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-trusted-ca\") pod \"collector-nglbt\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " pod="openshift-logging/collector-nglbt" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.179070 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-collector-token\") pod \"collector-nglbt\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " pod="openshift-logging/collector-nglbt" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.179117 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-strkx\" (UniqueName: \"kubernetes.io/projected/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-kube-api-access-strkx\") pod \"collector-nglbt\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " pod="openshift-logging/collector-nglbt" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.179162 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-datadir\") pod \"collector-nglbt\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " pod="openshift-logging/collector-nglbt" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.179187 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-config\") pod \"collector-nglbt\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " pod="openshift-logging/collector-nglbt" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.179216 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-collector-syslog-receiver\") pod \"collector-nglbt\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " pod="openshift-logging/collector-nglbt" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.179904 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-datadir\") pod \"collector-nglbt\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " pod="openshift-logging/collector-nglbt" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.181420 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-config\") pod \"collector-nglbt\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " pod="openshift-logging/collector-nglbt" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.181637 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-entrypoint\") pod \"collector-nglbt\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " pod="openshift-logging/collector-nglbt" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.182252 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-config-openshift-service-cacrt\") pod \"collector-nglbt\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " pod="openshift-logging/collector-nglbt" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.182972 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-trusted-ca\") pod \"collector-nglbt\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " pod="openshift-logging/collector-nglbt" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.187962 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-collector-syslog-receiver\") pod \"collector-nglbt\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " pod="openshift-logging/collector-nglbt" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.188450 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-collector-token\") pod \"collector-nglbt\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " pod="openshift-logging/collector-nglbt" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.191628 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-tmp\") pod \"collector-nglbt\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " pod="openshift-logging/collector-nglbt" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.196289 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-metrics\") pod \"collector-nglbt\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " pod="openshift-logging/collector-nglbt" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.197079 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-strkx\" (UniqueName: \"kubernetes.io/projected/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-kube-api-access-strkx\") pod \"collector-nglbt\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " pod="openshift-logging/collector-nglbt" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.198115 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-sa-token\") pod \"collector-nglbt\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " pod="openshift-logging/collector-nglbt" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.826368 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-nglbt" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.841076 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-nglbt" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.992790 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-strkx\" (UniqueName: \"kubernetes.io/projected/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-kube-api-access-strkx\") pod \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.992889 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-collector-token\") pod \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.992933 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-sa-token\") pod \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.992984 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-metrics\") pod \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.993059 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-entrypoint\") pod \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.993104 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-trusted-ca\") pod \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.993210 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-config\") pod \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.993255 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-datadir\") pod \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.993315 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-tmp\") pod \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.993367 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-collector-syslog-receiver\") pod \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.993400 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-config-openshift-service-cacrt\") pod \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\" (UID: \"7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1\") " Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.994344 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1" (UID: "7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.994339 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-config" (OuterVolumeSpecName: "config") pod "7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1" (UID: "7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.994471 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-datadir" (OuterVolumeSpecName: "datadir") pod "7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1" (UID: "7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1"). InnerVolumeSpecName "datadir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.994468 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-config-openshift-service-cacrt" (OuterVolumeSpecName: "config-openshift-service-cacrt") pod "7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1" (UID: "7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1"). InnerVolumeSpecName "config-openshift-service-cacrt". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.994482 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-entrypoint" (OuterVolumeSpecName: "entrypoint") pod "7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1" (UID: "7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1"). InnerVolumeSpecName "entrypoint". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.997203 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-tmp" (OuterVolumeSpecName: "tmp") pod "7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1" (UID: "7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.997963 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-collector-syslog-receiver" (OuterVolumeSpecName: "collector-syslog-receiver") pod "7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1" (UID: "7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1"). InnerVolumeSpecName "collector-syslog-receiver". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.998800 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-metrics" (OuterVolumeSpecName: "metrics") pod "7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1" (UID: "7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1"). InnerVolumeSpecName "metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.998880 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-collector-token" (OuterVolumeSpecName: "collector-token") pod "7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1" (UID: "7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1"). InnerVolumeSpecName "collector-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.999386 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-kube-api-access-strkx" (OuterVolumeSpecName: "kube-api-access-strkx") pod "7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1" (UID: "7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1"). InnerVolumeSpecName "kube-api-access-strkx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:50:29 crc kubenswrapper[4854]: I1125 09:50:29.999421 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-sa-token" (OuterVolumeSpecName: "sa-token") pod "7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1" (UID: "7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1"). InnerVolumeSpecName "sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:50:30 crc kubenswrapper[4854]: I1125 09:50:30.095599 4854 reconciler_common.go:293] "Volume detached for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-collector-token\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:30 crc kubenswrapper[4854]: I1125 09:50:30.095642 4854 reconciler_common.go:293] "Volume detached for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:30 crc kubenswrapper[4854]: I1125 09:50:30.095660 4854 reconciler_common.go:293] "Volume detached for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-metrics\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:30 crc kubenswrapper[4854]: I1125 09:50:30.095699 4854 reconciler_common.go:293] "Volume detached for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-entrypoint\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:30 crc kubenswrapper[4854]: I1125 09:50:30.095711 4854 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:30 crc kubenswrapper[4854]: I1125 09:50:30.095723 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:30 crc kubenswrapper[4854]: I1125 09:50:30.095735 4854 reconciler_common.go:293] "Volume detached for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-datadir\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:30 crc kubenswrapper[4854]: I1125 09:50:30.095746 4854 reconciler_common.go:293] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-tmp\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:30 crc kubenswrapper[4854]: I1125 09:50:30.095759 4854 reconciler_common.go:293] "Volume detached for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-config-openshift-service-cacrt\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:30 crc kubenswrapper[4854]: I1125 09:50:30.095775 4854 reconciler_common.go:293] "Volume detached for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-collector-syslog-receiver\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:30 crc kubenswrapper[4854]: I1125 09:50:30.095791 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-strkx\" (UniqueName: \"kubernetes.io/projected/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1-kube-api-access-strkx\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:30 crc kubenswrapper[4854]: I1125 09:50:30.833554 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-nglbt" Nov 25 09:50:30 crc kubenswrapper[4854]: I1125 09:50:30.901616 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-logging/collector-nglbt"] Nov 25 09:50:30 crc kubenswrapper[4854]: I1125 09:50:30.910861 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-logging/collector-nglbt"] Nov 25 09:50:30 crc kubenswrapper[4854]: I1125 09:50:30.923050 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/collector-wg25b"] Nov 25 09:50:30 crc kubenswrapper[4854]: I1125 09:50:30.924287 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-wg25b" Nov 25 09:50:30 crc kubenswrapper[4854]: I1125 09:50:30.926286 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-metrics" Nov 25 09:50:30 crc kubenswrapper[4854]: I1125 09:50:30.927224 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-dockercfg-8tmlk" Nov 25 09:50:30 crc kubenswrapper[4854]: I1125 09:50:30.928487 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-config" Nov 25 09:50:30 crc kubenswrapper[4854]: I1125 09:50:30.930188 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-token" Nov 25 09:50:30 crc kubenswrapper[4854]: I1125 09:50:30.930218 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-syslog-receiver" Nov 25 09:50:30 crc kubenswrapper[4854]: I1125 09:50:30.938134 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-wg25b"] Nov 25 09:50:30 crc kubenswrapper[4854]: I1125 09:50:30.938648 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-trustbundle" Nov 25 09:50:31 crc kubenswrapper[4854]: I1125 09:50:31.012386 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/00b58efa-c7d1-4ccb-93e1-15e8813f9074-collector-syslog-receiver\") pod \"collector-wg25b\" (UID: \"00b58efa-c7d1-4ccb-93e1-15e8813f9074\") " pod="openshift-logging/collector-wg25b" Nov 25 09:50:31 crc kubenswrapper[4854]: I1125 09:50:31.012493 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/00b58efa-c7d1-4ccb-93e1-15e8813f9074-sa-token\") pod \"collector-wg25b\" (UID: \"00b58efa-c7d1-4ccb-93e1-15e8813f9074\") " pod="openshift-logging/collector-wg25b" Nov 25 09:50:31 crc kubenswrapper[4854]: I1125 09:50:31.012533 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00b58efa-c7d1-4ccb-93e1-15e8813f9074-config\") pod \"collector-wg25b\" (UID: \"00b58efa-c7d1-4ccb-93e1-15e8813f9074\") " pod="openshift-logging/collector-wg25b" Nov 25 09:50:31 crc kubenswrapper[4854]: I1125 09:50:31.012624 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/00b58efa-c7d1-4ccb-93e1-15e8813f9074-config-openshift-service-cacrt\") pod \"collector-wg25b\" (UID: \"00b58efa-c7d1-4ccb-93e1-15e8813f9074\") " pod="openshift-logging/collector-wg25b" Nov 25 09:50:31 crc kubenswrapper[4854]: I1125 09:50:31.013631 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-82qxf\" (UniqueName: \"kubernetes.io/projected/00b58efa-c7d1-4ccb-93e1-15e8813f9074-kube-api-access-82qxf\") pod \"collector-wg25b\" (UID: \"00b58efa-c7d1-4ccb-93e1-15e8813f9074\") " pod="openshift-logging/collector-wg25b" Nov 25 09:50:31 crc kubenswrapper[4854]: I1125 09:50:31.013712 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/00b58efa-c7d1-4ccb-93e1-15e8813f9074-entrypoint\") pod \"collector-wg25b\" (UID: \"00b58efa-c7d1-4ccb-93e1-15e8813f9074\") " pod="openshift-logging/collector-wg25b" Nov 25 09:50:31 crc kubenswrapper[4854]: I1125 09:50:31.013775 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/00b58efa-c7d1-4ccb-93e1-15e8813f9074-metrics\") pod \"collector-wg25b\" (UID: \"00b58efa-c7d1-4ccb-93e1-15e8813f9074\") " pod="openshift-logging/collector-wg25b" Nov 25 09:50:31 crc kubenswrapper[4854]: I1125 09:50:31.013808 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/00b58efa-c7d1-4ccb-93e1-15e8813f9074-trusted-ca\") pod \"collector-wg25b\" (UID: \"00b58efa-c7d1-4ccb-93e1-15e8813f9074\") " pod="openshift-logging/collector-wg25b" Nov 25 09:50:31 crc kubenswrapper[4854]: I1125 09:50:31.013843 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/00b58efa-c7d1-4ccb-93e1-15e8813f9074-collector-token\") pod \"collector-wg25b\" (UID: \"00b58efa-c7d1-4ccb-93e1-15e8813f9074\") " pod="openshift-logging/collector-wg25b" Nov 25 09:50:31 crc kubenswrapper[4854]: I1125 09:50:31.013872 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/00b58efa-c7d1-4ccb-93e1-15e8813f9074-datadir\") pod \"collector-wg25b\" (UID: \"00b58efa-c7d1-4ccb-93e1-15e8813f9074\") " pod="openshift-logging/collector-wg25b" Nov 25 09:50:31 crc kubenswrapper[4854]: I1125 09:50:31.013918 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/00b58efa-c7d1-4ccb-93e1-15e8813f9074-tmp\") pod \"collector-wg25b\" (UID: \"00b58efa-c7d1-4ccb-93e1-15e8813f9074\") " pod="openshift-logging/collector-wg25b" Nov 25 09:50:31 crc kubenswrapper[4854]: I1125 09:50:31.022768 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1" path="/var/lib/kubelet/pods/7cd26fc5-5af3-4b32-af5e-39fc0a6c31d1/volumes" Nov 25 09:50:31 crc kubenswrapper[4854]: I1125 09:50:31.115723 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/00b58efa-c7d1-4ccb-93e1-15e8813f9074-metrics\") pod \"collector-wg25b\" (UID: \"00b58efa-c7d1-4ccb-93e1-15e8813f9074\") " pod="openshift-logging/collector-wg25b" Nov 25 09:50:31 crc kubenswrapper[4854]: I1125 09:50:31.116504 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/00b58efa-c7d1-4ccb-93e1-15e8813f9074-trusted-ca\") pod \"collector-wg25b\" (UID: \"00b58efa-c7d1-4ccb-93e1-15e8813f9074\") " pod="openshift-logging/collector-wg25b" Nov 25 09:50:31 crc kubenswrapper[4854]: I1125 09:50:31.116588 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/00b58efa-c7d1-4ccb-93e1-15e8813f9074-collector-token\") pod \"collector-wg25b\" (UID: \"00b58efa-c7d1-4ccb-93e1-15e8813f9074\") " pod="openshift-logging/collector-wg25b" Nov 25 09:50:31 crc kubenswrapper[4854]: I1125 09:50:31.116622 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/00b58efa-c7d1-4ccb-93e1-15e8813f9074-datadir\") pod \"collector-wg25b\" (UID: \"00b58efa-c7d1-4ccb-93e1-15e8813f9074\") " pod="openshift-logging/collector-wg25b" Nov 25 09:50:31 crc kubenswrapper[4854]: I1125 09:50:31.116767 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/00b58efa-c7d1-4ccb-93e1-15e8813f9074-tmp\") pod \"collector-wg25b\" (UID: \"00b58efa-c7d1-4ccb-93e1-15e8813f9074\") " pod="openshift-logging/collector-wg25b" Nov 25 09:50:31 crc kubenswrapper[4854]: I1125 09:50:31.116834 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/00b58efa-c7d1-4ccb-93e1-15e8813f9074-collector-syslog-receiver\") pod \"collector-wg25b\" (UID: \"00b58efa-c7d1-4ccb-93e1-15e8813f9074\") " pod="openshift-logging/collector-wg25b" Nov 25 09:50:31 crc kubenswrapper[4854]: I1125 09:50:31.116866 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/00b58efa-c7d1-4ccb-93e1-15e8813f9074-sa-token\") pod \"collector-wg25b\" (UID: \"00b58efa-c7d1-4ccb-93e1-15e8813f9074\") " pod="openshift-logging/collector-wg25b" Nov 25 09:50:31 crc kubenswrapper[4854]: I1125 09:50:31.116903 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00b58efa-c7d1-4ccb-93e1-15e8813f9074-config\") pod \"collector-wg25b\" (UID: \"00b58efa-c7d1-4ccb-93e1-15e8813f9074\") " pod="openshift-logging/collector-wg25b" Nov 25 09:50:31 crc kubenswrapper[4854]: I1125 09:50:31.116958 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/00b58efa-c7d1-4ccb-93e1-15e8813f9074-datadir\") pod \"collector-wg25b\" (UID: \"00b58efa-c7d1-4ccb-93e1-15e8813f9074\") " pod="openshift-logging/collector-wg25b" Nov 25 09:50:31 crc kubenswrapper[4854]: I1125 09:50:31.117037 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/00b58efa-c7d1-4ccb-93e1-15e8813f9074-config-openshift-service-cacrt\") pod \"collector-wg25b\" (UID: \"00b58efa-c7d1-4ccb-93e1-15e8813f9074\") " pod="openshift-logging/collector-wg25b" Nov 25 09:50:31 crc kubenswrapper[4854]: I1125 09:50:31.117121 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-82qxf\" (UniqueName: \"kubernetes.io/projected/00b58efa-c7d1-4ccb-93e1-15e8813f9074-kube-api-access-82qxf\") pod \"collector-wg25b\" (UID: \"00b58efa-c7d1-4ccb-93e1-15e8813f9074\") " pod="openshift-logging/collector-wg25b" Nov 25 09:50:31 crc kubenswrapper[4854]: I1125 09:50:31.117168 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/00b58efa-c7d1-4ccb-93e1-15e8813f9074-entrypoint\") pod \"collector-wg25b\" (UID: \"00b58efa-c7d1-4ccb-93e1-15e8813f9074\") " pod="openshift-logging/collector-wg25b" Nov 25 09:50:31 crc kubenswrapper[4854]: I1125 09:50:31.118408 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/00b58efa-c7d1-4ccb-93e1-15e8813f9074-config-openshift-service-cacrt\") pod \"collector-wg25b\" (UID: \"00b58efa-c7d1-4ccb-93e1-15e8813f9074\") " pod="openshift-logging/collector-wg25b" Nov 25 09:50:31 crc kubenswrapper[4854]: I1125 09:50:31.118617 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00b58efa-c7d1-4ccb-93e1-15e8813f9074-config\") pod \"collector-wg25b\" (UID: \"00b58efa-c7d1-4ccb-93e1-15e8813f9074\") " pod="openshift-logging/collector-wg25b" Nov 25 09:50:31 crc kubenswrapper[4854]: I1125 09:50:31.119286 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/00b58efa-c7d1-4ccb-93e1-15e8813f9074-entrypoint\") pod \"collector-wg25b\" (UID: \"00b58efa-c7d1-4ccb-93e1-15e8813f9074\") " pod="openshift-logging/collector-wg25b" Nov 25 09:50:31 crc kubenswrapper[4854]: I1125 09:50:31.118509 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/00b58efa-c7d1-4ccb-93e1-15e8813f9074-trusted-ca\") pod \"collector-wg25b\" (UID: \"00b58efa-c7d1-4ccb-93e1-15e8813f9074\") " pod="openshift-logging/collector-wg25b" Nov 25 09:50:31 crc kubenswrapper[4854]: I1125 09:50:31.121248 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/00b58efa-c7d1-4ccb-93e1-15e8813f9074-metrics\") pod \"collector-wg25b\" (UID: \"00b58efa-c7d1-4ccb-93e1-15e8813f9074\") " pod="openshift-logging/collector-wg25b" Nov 25 09:50:31 crc kubenswrapper[4854]: I1125 09:50:31.121323 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/00b58efa-c7d1-4ccb-93e1-15e8813f9074-tmp\") pod \"collector-wg25b\" (UID: \"00b58efa-c7d1-4ccb-93e1-15e8813f9074\") " pod="openshift-logging/collector-wg25b" Nov 25 09:50:31 crc kubenswrapper[4854]: I1125 09:50:31.123139 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/00b58efa-c7d1-4ccb-93e1-15e8813f9074-collector-token\") pod \"collector-wg25b\" (UID: \"00b58efa-c7d1-4ccb-93e1-15e8813f9074\") " pod="openshift-logging/collector-wg25b" Nov 25 09:50:31 crc kubenswrapper[4854]: I1125 09:50:31.123758 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/00b58efa-c7d1-4ccb-93e1-15e8813f9074-collector-syslog-receiver\") pod \"collector-wg25b\" (UID: \"00b58efa-c7d1-4ccb-93e1-15e8813f9074\") " pod="openshift-logging/collector-wg25b" Nov 25 09:50:31 crc kubenswrapper[4854]: I1125 09:50:31.160226 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/00b58efa-c7d1-4ccb-93e1-15e8813f9074-sa-token\") pod \"collector-wg25b\" (UID: \"00b58efa-c7d1-4ccb-93e1-15e8813f9074\") " pod="openshift-logging/collector-wg25b" Nov 25 09:50:31 crc kubenswrapper[4854]: I1125 09:50:31.161807 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-82qxf\" (UniqueName: \"kubernetes.io/projected/00b58efa-c7d1-4ccb-93e1-15e8813f9074-kube-api-access-82qxf\") pod \"collector-wg25b\" (UID: \"00b58efa-c7d1-4ccb-93e1-15e8813f9074\") " pod="openshift-logging/collector-wg25b" Nov 25 09:50:31 crc kubenswrapper[4854]: I1125 09:50:31.272809 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-wg25b" Nov 25 09:50:31 crc kubenswrapper[4854]: I1125 09:50:31.681024 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-wg25b"] Nov 25 09:50:31 crc kubenswrapper[4854]: W1125 09:50:31.688493 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod00b58efa_c7d1_4ccb_93e1_15e8813f9074.slice/crio-4396935057d656d899f6777756ede98930932c2df3e3bdca4553cb32028cf36e WatchSource:0}: Error finding container 4396935057d656d899f6777756ede98930932c2df3e3bdca4553cb32028cf36e: Status 404 returned error can't find the container with id 4396935057d656d899f6777756ede98930932c2df3e3bdca4553cb32028cf36e Nov 25 09:50:31 crc kubenswrapper[4854]: I1125 09:50:31.842250 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/collector-wg25b" event={"ID":"00b58efa-c7d1-4ccb-93e1-15e8813f9074","Type":"ContainerStarted","Data":"4396935057d656d899f6777756ede98930932c2df3e3bdca4553cb32028cf36e"} Nov 25 09:50:38 crc kubenswrapper[4854]: I1125 09:50:38.900695 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/collector-wg25b" event={"ID":"00b58efa-c7d1-4ccb-93e1-15e8813f9074","Type":"ContainerStarted","Data":"88df03ced09831b31d4b16251c73c729bcbb0b0128b5db885e51e7ffd555db80"} Nov 25 09:50:38 crc kubenswrapper[4854]: I1125 09:50:38.932755 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/collector-wg25b" podStartSLOduration=2.054956997 podStartE2EDuration="8.93273307s" podCreationTimestamp="2025-11-25 09:50:30 +0000 UTC" firstStartedPulling="2025-11-25 09:50:31.690475906 +0000 UTC m=+837.543469322" lastFinishedPulling="2025-11-25 09:50:38.568252009 +0000 UTC m=+844.421245395" observedRunningTime="2025-11-25 09:50:38.927120596 +0000 UTC m=+844.780113982" watchObservedRunningTime="2025-11-25 09:50:38.93273307 +0000 UTC m=+844.785726446" Nov 25 09:51:12 crc kubenswrapper[4854]: I1125 09:51:12.450767 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ehxm2x"] Nov 25 09:51:12 crc kubenswrapper[4854]: I1125 09:51:12.452885 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ehxm2x" Nov 25 09:51:12 crc kubenswrapper[4854]: I1125 09:51:12.454743 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 25 09:51:12 crc kubenswrapper[4854]: I1125 09:51:12.464135 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ehxm2x"] Nov 25 09:51:12 crc kubenswrapper[4854]: I1125 09:51:12.618941 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2qcmh\" (UniqueName: \"kubernetes.io/projected/17f88648-1c7f-409d-9f9c-9dad6ddfd03f-kube-api-access-2qcmh\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ehxm2x\" (UID: \"17f88648-1c7f-409d-9f9c-9dad6ddfd03f\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ehxm2x" Nov 25 09:51:12 crc kubenswrapper[4854]: I1125 09:51:12.619005 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/17f88648-1c7f-409d-9f9c-9dad6ddfd03f-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ehxm2x\" (UID: \"17f88648-1c7f-409d-9f9c-9dad6ddfd03f\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ehxm2x" Nov 25 09:51:12 crc kubenswrapper[4854]: I1125 09:51:12.619107 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/17f88648-1c7f-409d-9f9c-9dad6ddfd03f-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ehxm2x\" (UID: \"17f88648-1c7f-409d-9f9c-9dad6ddfd03f\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ehxm2x" Nov 25 09:51:12 crc kubenswrapper[4854]: I1125 09:51:12.720325 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/17f88648-1c7f-409d-9f9c-9dad6ddfd03f-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ehxm2x\" (UID: \"17f88648-1c7f-409d-9f9c-9dad6ddfd03f\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ehxm2x" Nov 25 09:51:12 crc kubenswrapper[4854]: I1125 09:51:12.720646 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/17f88648-1c7f-409d-9f9c-9dad6ddfd03f-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ehxm2x\" (UID: \"17f88648-1c7f-409d-9f9c-9dad6ddfd03f\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ehxm2x" Nov 25 09:51:12 crc kubenswrapper[4854]: I1125 09:51:12.720953 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/17f88648-1c7f-409d-9f9c-9dad6ddfd03f-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ehxm2x\" (UID: \"17f88648-1c7f-409d-9f9c-9dad6ddfd03f\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ehxm2x" Nov 25 09:51:12 crc kubenswrapper[4854]: I1125 09:51:12.720978 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2qcmh\" (UniqueName: \"kubernetes.io/projected/17f88648-1c7f-409d-9f9c-9dad6ddfd03f-kube-api-access-2qcmh\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ehxm2x\" (UID: \"17f88648-1c7f-409d-9f9c-9dad6ddfd03f\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ehxm2x" Nov 25 09:51:12 crc kubenswrapper[4854]: I1125 09:51:12.721161 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/17f88648-1c7f-409d-9f9c-9dad6ddfd03f-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ehxm2x\" (UID: \"17f88648-1c7f-409d-9f9c-9dad6ddfd03f\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ehxm2x" Nov 25 09:51:12 crc kubenswrapper[4854]: I1125 09:51:12.739280 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2qcmh\" (UniqueName: \"kubernetes.io/projected/17f88648-1c7f-409d-9f9c-9dad6ddfd03f-kube-api-access-2qcmh\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ehxm2x\" (UID: \"17f88648-1c7f-409d-9f9c-9dad6ddfd03f\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ehxm2x" Nov 25 09:51:12 crc kubenswrapper[4854]: I1125 09:51:12.771838 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ehxm2x" Nov 25 09:51:13 crc kubenswrapper[4854]: I1125 09:51:13.164264 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ehxm2x"] Nov 25 09:51:14 crc kubenswrapper[4854]: I1125 09:51:14.174047 4854 generic.go:334] "Generic (PLEG): container finished" podID="17f88648-1c7f-409d-9f9c-9dad6ddfd03f" containerID="3651052fedaddd6449ca1604e6ca857648bcb6faa289a9f6c036f0ce8520b658" exitCode=0 Nov 25 09:51:14 crc kubenswrapper[4854]: I1125 09:51:14.174243 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ehxm2x" event={"ID":"17f88648-1c7f-409d-9f9c-9dad6ddfd03f","Type":"ContainerDied","Data":"3651052fedaddd6449ca1604e6ca857648bcb6faa289a9f6c036f0ce8520b658"} Nov 25 09:51:14 crc kubenswrapper[4854]: I1125 09:51:14.174314 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ehxm2x" event={"ID":"17f88648-1c7f-409d-9f9c-9dad6ddfd03f","Type":"ContainerStarted","Data":"5f1d4a66f36c5452c5f60311215b39c2ec75b289e67a8700af0bef0d05003054"} Nov 25 09:51:16 crc kubenswrapper[4854]: I1125 09:51:16.188942 4854 generic.go:334] "Generic (PLEG): container finished" podID="17f88648-1c7f-409d-9f9c-9dad6ddfd03f" containerID="6c531a0ebc34ece0aab6ef3e59af82a731f03801080f7d128396933d84bde797" exitCode=0 Nov 25 09:51:16 crc kubenswrapper[4854]: I1125 09:51:16.189128 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ehxm2x" event={"ID":"17f88648-1c7f-409d-9f9c-9dad6ddfd03f","Type":"ContainerDied","Data":"6c531a0ebc34ece0aab6ef3e59af82a731f03801080f7d128396933d84bde797"} Nov 25 09:51:17 crc kubenswrapper[4854]: I1125 09:51:17.207791 4854 generic.go:334] "Generic (PLEG): container finished" podID="17f88648-1c7f-409d-9f9c-9dad6ddfd03f" containerID="70df3388a4e278c2d4e1240ef882c4b4c2f75829cfea74fec2e8693ee683fb20" exitCode=0 Nov 25 09:51:17 crc kubenswrapper[4854]: I1125 09:51:17.207914 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ehxm2x" event={"ID":"17f88648-1c7f-409d-9f9c-9dad6ddfd03f","Type":"ContainerDied","Data":"70df3388a4e278c2d4e1240ef882c4b4c2f75829cfea74fec2e8693ee683fb20"} Nov 25 09:51:18 crc kubenswrapper[4854]: I1125 09:51:18.510565 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ehxm2x" Nov 25 09:51:18 crc kubenswrapper[4854]: I1125 09:51:18.709565 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/17f88648-1c7f-409d-9f9c-9dad6ddfd03f-bundle\") pod \"17f88648-1c7f-409d-9f9c-9dad6ddfd03f\" (UID: \"17f88648-1c7f-409d-9f9c-9dad6ddfd03f\") " Nov 25 09:51:18 crc kubenswrapper[4854]: I1125 09:51:18.709686 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2qcmh\" (UniqueName: \"kubernetes.io/projected/17f88648-1c7f-409d-9f9c-9dad6ddfd03f-kube-api-access-2qcmh\") pod \"17f88648-1c7f-409d-9f9c-9dad6ddfd03f\" (UID: \"17f88648-1c7f-409d-9f9c-9dad6ddfd03f\") " Nov 25 09:51:18 crc kubenswrapper[4854]: I1125 09:51:18.709714 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/17f88648-1c7f-409d-9f9c-9dad6ddfd03f-util\") pod \"17f88648-1c7f-409d-9f9c-9dad6ddfd03f\" (UID: \"17f88648-1c7f-409d-9f9c-9dad6ddfd03f\") " Nov 25 09:51:18 crc kubenswrapper[4854]: I1125 09:51:18.710300 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/17f88648-1c7f-409d-9f9c-9dad6ddfd03f-bundle" (OuterVolumeSpecName: "bundle") pod "17f88648-1c7f-409d-9f9c-9dad6ddfd03f" (UID: "17f88648-1c7f-409d-9f9c-9dad6ddfd03f"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:51:18 crc kubenswrapper[4854]: I1125 09:51:18.717856 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/17f88648-1c7f-409d-9f9c-9dad6ddfd03f-kube-api-access-2qcmh" (OuterVolumeSpecName: "kube-api-access-2qcmh") pod "17f88648-1c7f-409d-9f9c-9dad6ddfd03f" (UID: "17f88648-1c7f-409d-9f9c-9dad6ddfd03f"). InnerVolumeSpecName "kube-api-access-2qcmh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:51:18 crc kubenswrapper[4854]: I1125 09:51:18.725625 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/17f88648-1c7f-409d-9f9c-9dad6ddfd03f-util" (OuterVolumeSpecName: "util") pod "17f88648-1c7f-409d-9f9c-9dad6ddfd03f" (UID: "17f88648-1c7f-409d-9f9c-9dad6ddfd03f"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:51:18 crc kubenswrapper[4854]: I1125 09:51:18.812217 4854 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/17f88648-1c7f-409d-9f9c-9dad6ddfd03f-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:51:18 crc kubenswrapper[4854]: I1125 09:51:18.812258 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2qcmh\" (UniqueName: \"kubernetes.io/projected/17f88648-1c7f-409d-9f9c-9dad6ddfd03f-kube-api-access-2qcmh\") on node \"crc\" DevicePath \"\"" Nov 25 09:51:18 crc kubenswrapper[4854]: I1125 09:51:18.812270 4854 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/17f88648-1c7f-409d-9f9c-9dad6ddfd03f-util\") on node \"crc\" DevicePath \"\"" Nov 25 09:51:19 crc kubenswrapper[4854]: I1125 09:51:19.221790 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ehxm2x" event={"ID":"17f88648-1c7f-409d-9f9c-9dad6ddfd03f","Type":"ContainerDied","Data":"5f1d4a66f36c5452c5f60311215b39c2ec75b289e67a8700af0bef0d05003054"} Nov 25 09:51:19 crc kubenswrapper[4854]: I1125 09:51:19.221826 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5f1d4a66f36c5452c5f60311215b39c2ec75b289e67a8700af0bef0d05003054" Nov 25 09:51:19 crc kubenswrapper[4854]: I1125 09:51:19.222075 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ehxm2x" Nov 25 09:51:21 crc kubenswrapper[4854]: I1125 09:51:21.711314 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-f8dcl"] Nov 25 09:51:21 crc kubenswrapper[4854]: E1125 09:51:21.711949 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17f88648-1c7f-409d-9f9c-9dad6ddfd03f" containerName="util" Nov 25 09:51:21 crc kubenswrapper[4854]: I1125 09:51:21.711965 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="17f88648-1c7f-409d-9f9c-9dad6ddfd03f" containerName="util" Nov 25 09:51:21 crc kubenswrapper[4854]: E1125 09:51:21.711984 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17f88648-1c7f-409d-9f9c-9dad6ddfd03f" containerName="pull" Nov 25 09:51:21 crc kubenswrapper[4854]: I1125 09:51:21.711992 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="17f88648-1c7f-409d-9f9c-9dad6ddfd03f" containerName="pull" Nov 25 09:51:21 crc kubenswrapper[4854]: E1125 09:51:21.712014 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17f88648-1c7f-409d-9f9c-9dad6ddfd03f" containerName="extract" Nov 25 09:51:21 crc kubenswrapper[4854]: I1125 09:51:21.712022 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="17f88648-1c7f-409d-9f9c-9dad6ddfd03f" containerName="extract" Nov 25 09:51:21 crc kubenswrapper[4854]: I1125 09:51:21.712191 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="17f88648-1c7f-409d-9f9c-9dad6ddfd03f" containerName="extract" Nov 25 09:51:21 crc kubenswrapper[4854]: I1125 09:51:21.712828 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-f8dcl" Nov 25 09:51:21 crc kubenswrapper[4854]: I1125 09:51:21.719174 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-wq5jc" Nov 25 09:51:21 crc kubenswrapper[4854]: I1125 09:51:21.719202 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 25 09:51:21 crc kubenswrapper[4854]: I1125 09:51:21.719317 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 25 09:51:21 crc kubenswrapper[4854]: I1125 09:51:21.732202 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-f8dcl"] Nov 25 09:51:21 crc kubenswrapper[4854]: I1125 09:51:21.867115 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gd8nb\" (UniqueName: \"kubernetes.io/projected/c54d3a2c-5361-4407-bd50-70678088146a-kube-api-access-gd8nb\") pod \"nmstate-operator-557fdffb88-f8dcl\" (UID: \"c54d3a2c-5361-4407-bd50-70678088146a\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-f8dcl" Nov 25 09:51:21 crc kubenswrapper[4854]: I1125 09:51:21.969010 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gd8nb\" (UniqueName: \"kubernetes.io/projected/c54d3a2c-5361-4407-bd50-70678088146a-kube-api-access-gd8nb\") pod \"nmstate-operator-557fdffb88-f8dcl\" (UID: \"c54d3a2c-5361-4407-bd50-70678088146a\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-f8dcl" Nov 25 09:51:21 crc kubenswrapper[4854]: I1125 09:51:21.995971 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gd8nb\" (UniqueName: \"kubernetes.io/projected/c54d3a2c-5361-4407-bd50-70678088146a-kube-api-access-gd8nb\") pod \"nmstate-operator-557fdffb88-f8dcl\" (UID: \"c54d3a2c-5361-4407-bd50-70678088146a\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-f8dcl" Nov 25 09:51:22 crc kubenswrapper[4854]: I1125 09:51:22.031591 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-f8dcl" Nov 25 09:51:22 crc kubenswrapper[4854]: I1125 09:51:22.472608 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-f8dcl"] Nov 25 09:51:23 crc kubenswrapper[4854]: I1125 09:51:23.249987 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-f8dcl" event={"ID":"c54d3a2c-5361-4407-bd50-70678088146a","Type":"ContainerStarted","Data":"1e7f8d0f06d046292f53e1b84bd2be0e42bd7f37b8ed6da2e0b27ab31baf1414"} Nov 25 09:51:26 crc kubenswrapper[4854]: I1125 09:51:26.274811 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-f8dcl" event={"ID":"c54d3a2c-5361-4407-bd50-70678088146a","Type":"ContainerStarted","Data":"04654252e4c20799d766e7071cf6ec4349e2e54d4af6ba0f7566796d390c8225"} Nov 25 09:51:26 crc kubenswrapper[4854]: I1125 09:51:26.334743 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-557fdffb88-f8dcl" podStartSLOduration=2.379556508 podStartE2EDuration="5.334726704s" podCreationTimestamp="2025-11-25 09:51:21 +0000 UTC" firstStartedPulling="2025-11-25 09:51:22.494392409 +0000 UTC m=+888.347385785" lastFinishedPulling="2025-11-25 09:51:25.449562605 +0000 UTC m=+891.302555981" observedRunningTime="2025-11-25 09:51:26.334009154 +0000 UTC m=+892.187002530" watchObservedRunningTime="2025-11-25 09:51:26.334726704 +0000 UTC m=+892.187720080" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.347265 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-8xhgf"] Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.351117 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-8xhgf" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.353657 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-75ps6" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.363579 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-8hkpp"] Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.364542 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-8hkpp" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.370342 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.371223 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-8xhgf"] Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.382819 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-8hkpp"] Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.401880 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-xq8dt"] Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.402920 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-xq8dt" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.456046 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-72cdg\" (UniqueName: \"kubernetes.io/projected/459bf834-bbae-4e88-812d-0f7f4f2560f5-kube-api-access-72cdg\") pod \"nmstate-metrics-5dcf9c57c5-8xhgf\" (UID: \"459bf834-bbae-4e88-812d-0f7f4f2560f5\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-8xhgf" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.456108 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dfvfb\" (UniqueName: \"kubernetes.io/projected/f1e621b9-4a85-435a-af8b-2e17f4e74bef-kube-api-access-dfvfb\") pod \"nmstate-webhook-6b89b748d8-8hkpp\" (UID: \"f1e621b9-4a85-435a-af8b-2e17f4e74bef\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-8hkpp" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.456146 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/f1e621b9-4a85-435a-af8b-2e17f4e74bef-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-8hkpp\" (UID: \"f1e621b9-4a85-435a-af8b-2e17f4e74bef\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-8hkpp" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.558073 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/22559aba-4245-4836-9ea5-9edad39725c5-dbus-socket\") pod \"nmstate-handler-xq8dt\" (UID: \"22559aba-4245-4836-9ea5-9edad39725c5\") " pod="openshift-nmstate/nmstate-handler-xq8dt" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.558241 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-72cdg\" (UniqueName: \"kubernetes.io/projected/459bf834-bbae-4e88-812d-0f7f4f2560f5-kube-api-access-72cdg\") pod \"nmstate-metrics-5dcf9c57c5-8xhgf\" (UID: \"459bf834-bbae-4e88-812d-0f7f4f2560f5\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-8xhgf" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.558263 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dfvfb\" (UniqueName: \"kubernetes.io/projected/f1e621b9-4a85-435a-af8b-2e17f4e74bef-kube-api-access-dfvfb\") pod \"nmstate-webhook-6b89b748d8-8hkpp\" (UID: \"f1e621b9-4a85-435a-af8b-2e17f4e74bef\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-8hkpp" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.558286 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/f1e621b9-4a85-435a-af8b-2e17f4e74bef-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-8hkpp\" (UID: \"f1e621b9-4a85-435a-af8b-2e17f4e74bef\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-8hkpp" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.558330 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/22559aba-4245-4836-9ea5-9edad39725c5-ovs-socket\") pod \"nmstate-handler-xq8dt\" (UID: \"22559aba-4245-4836-9ea5-9edad39725c5\") " pod="openshift-nmstate/nmstate-handler-xq8dt" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.558364 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jgf2x\" (UniqueName: \"kubernetes.io/projected/22559aba-4245-4836-9ea5-9edad39725c5-kube-api-access-jgf2x\") pod \"nmstate-handler-xq8dt\" (UID: \"22559aba-4245-4836-9ea5-9edad39725c5\") " pod="openshift-nmstate/nmstate-handler-xq8dt" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.558417 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/22559aba-4245-4836-9ea5-9edad39725c5-nmstate-lock\") pod \"nmstate-handler-xq8dt\" (UID: \"22559aba-4245-4836-9ea5-9edad39725c5\") " pod="openshift-nmstate/nmstate-handler-xq8dt" Nov 25 09:51:27 crc kubenswrapper[4854]: E1125 09:51:27.558428 4854 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Nov 25 09:51:27 crc kubenswrapper[4854]: E1125 09:51:27.558505 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f1e621b9-4a85-435a-af8b-2e17f4e74bef-tls-key-pair podName:f1e621b9-4a85-435a-af8b-2e17f4e74bef nodeName:}" failed. No retries permitted until 2025-11-25 09:51:28.058485682 +0000 UTC m=+893.911479148 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/f1e621b9-4a85-435a-af8b-2e17f4e74bef-tls-key-pair") pod "nmstate-webhook-6b89b748d8-8hkpp" (UID: "f1e621b9-4a85-435a-af8b-2e17f4e74bef") : secret "openshift-nmstate-webhook" not found Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.561322 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2tmw9"] Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.562249 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2tmw9" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.570414 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.570531 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.571373 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-mqv9f" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.590868 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-72cdg\" (UniqueName: \"kubernetes.io/projected/459bf834-bbae-4e88-812d-0f7f4f2560f5-kube-api-access-72cdg\") pod \"nmstate-metrics-5dcf9c57c5-8xhgf\" (UID: \"459bf834-bbae-4e88-812d-0f7f4f2560f5\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-8xhgf" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.594394 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dfvfb\" (UniqueName: \"kubernetes.io/projected/f1e621b9-4a85-435a-af8b-2e17f4e74bef-kube-api-access-dfvfb\") pod \"nmstate-webhook-6b89b748d8-8hkpp\" (UID: \"f1e621b9-4a85-435a-af8b-2e17f4e74bef\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-8hkpp" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.604420 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2tmw9"] Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.660227 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/22559aba-4245-4836-9ea5-9edad39725c5-nmstate-lock\") pod \"nmstate-handler-xq8dt\" (UID: \"22559aba-4245-4836-9ea5-9edad39725c5\") " pod="openshift-nmstate/nmstate-handler-xq8dt" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.660271 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/22559aba-4245-4836-9ea5-9edad39725c5-dbus-socket\") pod \"nmstate-handler-xq8dt\" (UID: \"22559aba-4245-4836-9ea5-9edad39725c5\") " pod="openshift-nmstate/nmstate-handler-xq8dt" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.660372 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/772f9cbd-1f86-4502-b501-bb781c0f11fe-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-2tmw9\" (UID: \"772f9cbd-1f86-4502-b501-bb781c0f11fe\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2tmw9" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.660405 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-glbrf\" (UniqueName: \"kubernetes.io/projected/772f9cbd-1f86-4502-b501-bb781c0f11fe-kube-api-access-glbrf\") pod \"nmstate-console-plugin-5874bd7bc5-2tmw9\" (UID: \"772f9cbd-1f86-4502-b501-bb781c0f11fe\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2tmw9" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.660464 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/22559aba-4245-4836-9ea5-9edad39725c5-ovs-socket\") pod \"nmstate-handler-xq8dt\" (UID: \"22559aba-4245-4836-9ea5-9edad39725c5\") " pod="openshift-nmstate/nmstate-handler-xq8dt" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.660510 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jgf2x\" (UniqueName: \"kubernetes.io/projected/22559aba-4245-4836-9ea5-9edad39725c5-kube-api-access-jgf2x\") pod \"nmstate-handler-xq8dt\" (UID: \"22559aba-4245-4836-9ea5-9edad39725c5\") " pod="openshift-nmstate/nmstate-handler-xq8dt" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.660538 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/772f9cbd-1f86-4502-b501-bb781c0f11fe-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-2tmw9\" (UID: \"772f9cbd-1f86-4502-b501-bb781c0f11fe\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2tmw9" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.660659 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/22559aba-4245-4836-9ea5-9edad39725c5-nmstate-lock\") pod \"nmstate-handler-xq8dt\" (UID: \"22559aba-4245-4836-9ea5-9edad39725c5\") " pod="openshift-nmstate/nmstate-handler-xq8dt" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.660977 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/22559aba-4245-4836-9ea5-9edad39725c5-dbus-socket\") pod \"nmstate-handler-xq8dt\" (UID: \"22559aba-4245-4836-9ea5-9edad39725c5\") " pod="openshift-nmstate/nmstate-handler-xq8dt" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.661028 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/22559aba-4245-4836-9ea5-9edad39725c5-ovs-socket\") pod \"nmstate-handler-xq8dt\" (UID: \"22559aba-4245-4836-9ea5-9edad39725c5\") " pod="openshift-nmstate/nmstate-handler-xq8dt" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.671957 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-8xhgf" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.691342 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jgf2x\" (UniqueName: \"kubernetes.io/projected/22559aba-4245-4836-9ea5-9edad39725c5-kube-api-access-jgf2x\") pod \"nmstate-handler-xq8dt\" (UID: \"22559aba-4245-4836-9ea5-9edad39725c5\") " pod="openshift-nmstate/nmstate-handler-xq8dt" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.722137 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-xq8dt" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.762400 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/772f9cbd-1f86-4502-b501-bb781c0f11fe-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-2tmw9\" (UID: \"772f9cbd-1f86-4502-b501-bb781c0f11fe\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2tmw9" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.762466 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glbrf\" (UniqueName: \"kubernetes.io/projected/772f9cbd-1f86-4502-b501-bb781c0f11fe-kube-api-access-glbrf\") pod \"nmstate-console-plugin-5874bd7bc5-2tmw9\" (UID: \"772f9cbd-1f86-4502-b501-bb781c0f11fe\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2tmw9" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.762549 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/772f9cbd-1f86-4502-b501-bb781c0f11fe-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-2tmw9\" (UID: \"772f9cbd-1f86-4502-b501-bb781c0f11fe\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2tmw9" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.763584 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/772f9cbd-1f86-4502-b501-bb781c0f11fe-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-2tmw9\" (UID: \"772f9cbd-1f86-4502-b501-bb781c0f11fe\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2tmw9" Nov 25 09:51:27 crc kubenswrapper[4854]: E1125 09:51:27.763696 4854 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Nov 25 09:51:27 crc kubenswrapper[4854]: E1125 09:51:27.763791 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/772f9cbd-1f86-4502-b501-bb781c0f11fe-plugin-serving-cert podName:772f9cbd-1f86-4502-b501-bb781c0f11fe nodeName:}" failed. No retries permitted until 2025-11-25 09:51:28.263774557 +0000 UTC m=+894.116767943 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/772f9cbd-1f86-4502-b501-bb781c0f11fe-plugin-serving-cert") pod "nmstate-console-plugin-5874bd7bc5-2tmw9" (UID: "772f9cbd-1f86-4502-b501-bb781c0f11fe") : secret "plugin-serving-cert" not found Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.782645 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-glbrf\" (UniqueName: \"kubernetes.io/projected/772f9cbd-1f86-4502-b501-bb781c0f11fe-kube-api-access-glbrf\") pod \"nmstate-console-plugin-5874bd7bc5-2tmw9\" (UID: \"772f9cbd-1f86-4502-b501-bb781c0f11fe\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2tmw9" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.830489 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-765ccc5d45-ngmp2"] Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.831660 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-765ccc5d45-ngmp2" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.856767 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-765ccc5d45-ngmp2"] Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.967658 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-console-serving-cert\") pod \"console-765ccc5d45-ngmp2\" (UID: \"8f308b0a-c5d0-446b-8489-7a8fcdaac38f\") " pod="openshift-console/console-765ccc5d45-ngmp2" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.967752 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mtgff\" (UniqueName: \"kubernetes.io/projected/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-kube-api-access-mtgff\") pod \"console-765ccc5d45-ngmp2\" (UID: \"8f308b0a-c5d0-446b-8489-7a8fcdaac38f\") " pod="openshift-console/console-765ccc5d45-ngmp2" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.967780 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-service-ca\") pod \"console-765ccc5d45-ngmp2\" (UID: \"8f308b0a-c5d0-446b-8489-7a8fcdaac38f\") " pod="openshift-console/console-765ccc5d45-ngmp2" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.967866 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-trusted-ca-bundle\") pod \"console-765ccc5d45-ngmp2\" (UID: \"8f308b0a-c5d0-446b-8489-7a8fcdaac38f\") " pod="openshift-console/console-765ccc5d45-ngmp2" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.967901 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-console-config\") pod \"console-765ccc5d45-ngmp2\" (UID: \"8f308b0a-c5d0-446b-8489-7a8fcdaac38f\") " pod="openshift-console/console-765ccc5d45-ngmp2" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.967938 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-oauth-serving-cert\") pod \"console-765ccc5d45-ngmp2\" (UID: \"8f308b0a-c5d0-446b-8489-7a8fcdaac38f\") " pod="openshift-console/console-765ccc5d45-ngmp2" Nov 25 09:51:27 crc kubenswrapper[4854]: I1125 09:51:27.967980 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-console-oauth-config\") pod \"console-765ccc5d45-ngmp2\" (UID: \"8f308b0a-c5d0-446b-8489-7a8fcdaac38f\") " pod="openshift-console/console-765ccc5d45-ngmp2" Nov 25 09:51:28 crc kubenswrapper[4854]: I1125 09:51:28.076109 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-console-config\") pod \"console-765ccc5d45-ngmp2\" (UID: \"8f308b0a-c5d0-446b-8489-7a8fcdaac38f\") " pod="openshift-console/console-765ccc5d45-ngmp2" Nov 25 09:51:28 crc kubenswrapper[4854]: I1125 09:51:28.076170 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-oauth-serving-cert\") pod \"console-765ccc5d45-ngmp2\" (UID: \"8f308b0a-c5d0-446b-8489-7a8fcdaac38f\") " pod="openshift-console/console-765ccc5d45-ngmp2" Nov 25 09:51:28 crc kubenswrapper[4854]: I1125 09:51:28.076234 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-console-oauth-config\") pod \"console-765ccc5d45-ngmp2\" (UID: \"8f308b0a-c5d0-446b-8489-7a8fcdaac38f\") " pod="openshift-console/console-765ccc5d45-ngmp2" Nov 25 09:51:28 crc kubenswrapper[4854]: I1125 09:51:28.076292 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/f1e621b9-4a85-435a-af8b-2e17f4e74bef-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-8hkpp\" (UID: \"f1e621b9-4a85-435a-af8b-2e17f4e74bef\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-8hkpp" Nov 25 09:51:28 crc kubenswrapper[4854]: I1125 09:51:28.076316 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-console-serving-cert\") pod \"console-765ccc5d45-ngmp2\" (UID: \"8f308b0a-c5d0-446b-8489-7a8fcdaac38f\") " pod="openshift-console/console-765ccc5d45-ngmp2" Nov 25 09:51:28 crc kubenswrapper[4854]: I1125 09:51:28.076343 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mtgff\" (UniqueName: \"kubernetes.io/projected/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-kube-api-access-mtgff\") pod \"console-765ccc5d45-ngmp2\" (UID: \"8f308b0a-c5d0-446b-8489-7a8fcdaac38f\") " pod="openshift-console/console-765ccc5d45-ngmp2" Nov 25 09:51:28 crc kubenswrapper[4854]: I1125 09:51:28.076363 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-service-ca\") pod \"console-765ccc5d45-ngmp2\" (UID: \"8f308b0a-c5d0-446b-8489-7a8fcdaac38f\") " pod="openshift-console/console-765ccc5d45-ngmp2" Nov 25 09:51:28 crc kubenswrapper[4854]: I1125 09:51:28.076435 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-trusted-ca-bundle\") pod \"console-765ccc5d45-ngmp2\" (UID: \"8f308b0a-c5d0-446b-8489-7a8fcdaac38f\") " pod="openshift-console/console-765ccc5d45-ngmp2" Nov 25 09:51:28 crc kubenswrapper[4854]: I1125 09:51:28.077311 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-oauth-serving-cert\") pod \"console-765ccc5d45-ngmp2\" (UID: \"8f308b0a-c5d0-446b-8489-7a8fcdaac38f\") " pod="openshift-console/console-765ccc5d45-ngmp2" Nov 25 09:51:28 crc kubenswrapper[4854]: E1125 09:51:28.077423 4854 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Nov 25 09:51:28 crc kubenswrapper[4854]: E1125 09:51:28.077492 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f1e621b9-4a85-435a-af8b-2e17f4e74bef-tls-key-pair podName:f1e621b9-4a85-435a-af8b-2e17f4e74bef nodeName:}" failed. No retries permitted until 2025-11-25 09:51:29.077474582 +0000 UTC m=+894.930468018 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/f1e621b9-4a85-435a-af8b-2e17f4e74bef-tls-key-pair") pod "nmstate-webhook-6b89b748d8-8hkpp" (UID: "f1e621b9-4a85-435a-af8b-2e17f4e74bef") : secret "openshift-nmstate-webhook" not found Nov 25 09:51:28 crc kubenswrapper[4854]: I1125 09:51:28.077607 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-service-ca\") pod \"console-765ccc5d45-ngmp2\" (UID: \"8f308b0a-c5d0-446b-8489-7a8fcdaac38f\") " pod="openshift-console/console-765ccc5d45-ngmp2" Nov 25 09:51:28 crc kubenswrapper[4854]: I1125 09:51:28.077628 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-console-config\") pod \"console-765ccc5d45-ngmp2\" (UID: \"8f308b0a-c5d0-446b-8489-7a8fcdaac38f\") " pod="openshift-console/console-765ccc5d45-ngmp2" Nov 25 09:51:28 crc kubenswrapper[4854]: I1125 09:51:28.077834 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-trusted-ca-bundle\") pod \"console-765ccc5d45-ngmp2\" (UID: \"8f308b0a-c5d0-446b-8489-7a8fcdaac38f\") " pod="openshift-console/console-765ccc5d45-ngmp2" Nov 25 09:51:28 crc kubenswrapper[4854]: I1125 09:51:28.081976 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-console-serving-cert\") pod \"console-765ccc5d45-ngmp2\" (UID: \"8f308b0a-c5d0-446b-8489-7a8fcdaac38f\") " pod="openshift-console/console-765ccc5d45-ngmp2" Nov 25 09:51:28 crc kubenswrapper[4854]: I1125 09:51:28.082059 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-console-oauth-config\") pod \"console-765ccc5d45-ngmp2\" (UID: \"8f308b0a-c5d0-446b-8489-7a8fcdaac38f\") " pod="openshift-console/console-765ccc5d45-ngmp2" Nov 25 09:51:28 crc kubenswrapper[4854]: I1125 09:51:28.097378 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mtgff\" (UniqueName: \"kubernetes.io/projected/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-kube-api-access-mtgff\") pod \"console-765ccc5d45-ngmp2\" (UID: \"8f308b0a-c5d0-446b-8489-7a8fcdaac38f\") " pod="openshift-console/console-765ccc5d45-ngmp2" Nov 25 09:51:28 crc kubenswrapper[4854]: I1125 09:51:28.185774 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-765ccc5d45-ngmp2" Nov 25 09:51:28 crc kubenswrapper[4854]: I1125 09:51:28.232259 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-8xhgf"] Nov 25 09:51:28 crc kubenswrapper[4854]: I1125 09:51:28.279962 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/772f9cbd-1f86-4502-b501-bb781c0f11fe-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-2tmw9\" (UID: \"772f9cbd-1f86-4502-b501-bb781c0f11fe\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2tmw9" Nov 25 09:51:28 crc kubenswrapper[4854]: I1125 09:51:28.284203 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/772f9cbd-1f86-4502-b501-bb781c0f11fe-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-2tmw9\" (UID: \"772f9cbd-1f86-4502-b501-bb781c0f11fe\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2tmw9" Nov 25 09:51:28 crc kubenswrapper[4854]: I1125 09:51:28.300098 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-8xhgf" event={"ID":"459bf834-bbae-4e88-812d-0f7f4f2560f5","Type":"ContainerStarted","Data":"825ca223fc498093cb5ee1848dd2bdf680151da8dc6c67c154fba8afd764f7da"} Nov 25 09:51:28 crc kubenswrapper[4854]: I1125 09:51:28.309394 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-xq8dt" event={"ID":"22559aba-4245-4836-9ea5-9edad39725c5","Type":"ContainerStarted","Data":"b83f9a50c779f845791a4558f9dca81b7844f60216bdc67b2bfcbe5447e17ca1"} Nov 25 09:51:28 crc kubenswrapper[4854]: I1125 09:51:28.480288 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2tmw9" Nov 25 09:51:28 crc kubenswrapper[4854]: I1125 09:51:28.618914 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-765ccc5d45-ngmp2"] Nov 25 09:51:28 crc kubenswrapper[4854]: I1125 09:51:28.912140 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2tmw9"] Nov 25 09:51:29 crc kubenswrapper[4854]: I1125 09:51:29.107803 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/f1e621b9-4a85-435a-af8b-2e17f4e74bef-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-8hkpp\" (UID: \"f1e621b9-4a85-435a-af8b-2e17f4e74bef\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-8hkpp" Nov 25 09:51:29 crc kubenswrapper[4854]: I1125 09:51:29.113236 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/f1e621b9-4a85-435a-af8b-2e17f4e74bef-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-8hkpp\" (UID: \"f1e621b9-4a85-435a-af8b-2e17f4e74bef\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-8hkpp" Nov 25 09:51:29 crc kubenswrapper[4854]: I1125 09:51:29.193201 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-8hkpp" Nov 25 09:51:29 crc kubenswrapper[4854]: I1125 09:51:29.317166 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2tmw9" event={"ID":"772f9cbd-1f86-4502-b501-bb781c0f11fe","Type":"ContainerStarted","Data":"c9188a0d0fb539f426018dcafdd57c54ad1603073ca17713e4bdcde5c553126f"} Nov 25 09:51:29 crc kubenswrapper[4854]: I1125 09:51:29.318850 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-765ccc5d45-ngmp2" event={"ID":"8f308b0a-c5d0-446b-8489-7a8fcdaac38f","Type":"ContainerStarted","Data":"3021beef4c4019245c3d2c23497aaf5ab80e22e1596f0096a38f3daae0567676"} Nov 25 09:51:29 crc kubenswrapper[4854]: I1125 09:51:29.318880 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-765ccc5d45-ngmp2" event={"ID":"8f308b0a-c5d0-446b-8489-7a8fcdaac38f","Type":"ContainerStarted","Data":"8aad63e6476398bc868af80c7b2f0eff7e3d3ad4e66f216053f192f4d9e6ad22"} Nov 25 09:51:29 crc kubenswrapper[4854]: I1125 09:51:29.339453 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-765ccc5d45-ngmp2" podStartSLOduration=2.339431552 podStartE2EDuration="2.339431552s" podCreationTimestamp="2025-11-25 09:51:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:51:29.334908547 +0000 UTC m=+895.187901933" watchObservedRunningTime="2025-11-25 09:51:29.339431552 +0000 UTC m=+895.192424928" Nov 25 09:51:29 crc kubenswrapper[4854]: I1125 09:51:29.637087 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-8hkpp"] Nov 25 09:51:30 crc kubenswrapper[4854]: W1125 09:51:30.349945 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf1e621b9_4a85_435a_af8b_2e17f4e74bef.slice/crio-532cc9e9ed80836b0350f84befb78730056802554a154ec2046807171fd1a7d3 WatchSource:0}: Error finding container 532cc9e9ed80836b0350f84befb78730056802554a154ec2046807171fd1a7d3: Status 404 returned error can't find the container with id 532cc9e9ed80836b0350f84befb78730056802554a154ec2046807171fd1a7d3 Nov 25 09:51:31 crc kubenswrapper[4854]: I1125 09:51:31.340897 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-8hkpp" event={"ID":"f1e621b9-4a85-435a-af8b-2e17f4e74bef","Type":"ContainerStarted","Data":"532cc9e9ed80836b0350f84befb78730056802554a154ec2046807171fd1a7d3"} Nov 25 09:51:31 crc kubenswrapper[4854]: I1125 09:51:31.344216 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-8xhgf" event={"ID":"459bf834-bbae-4e88-812d-0f7f4f2560f5","Type":"ContainerStarted","Data":"de4333f02ddc39676837d9393ce736f21fafeba363c7abf91db8fd99c1df105c"} Nov 25 09:51:31 crc kubenswrapper[4854]: I1125 09:51:31.348177 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-xq8dt" event={"ID":"22559aba-4245-4836-9ea5-9edad39725c5","Type":"ContainerStarted","Data":"4f8575b6a9a42fb987d2e41a201a21cee3ab3de70db9ff9a611464ea96268bb7"} Nov 25 09:51:31 crc kubenswrapper[4854]: I1125 09:51:31.348362 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-xq8dt" Nov 25 09:51:31 crc kubenswrapper[4854]: I1125 09:51:31.368813 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-xq8dt" podStartSLOduration=1.7765524849999998 podStartE2EDuration="4.368796401s" podCreationTimestamp="2025-11-25 09:51:27 +0000 UTC" firstStartedPulling="2025-11-25 09:51:27.837005661 +0000 UTC m=+893.689999027" lastFinishedPulling="2025-11-25 09:51:30.429249567 +0000 UTC m=+896.282242943" observedRunningTime="2025-11-25 09:51:31.367131405 +0000 UTC m=+897.220124801" watchObservedRunningTime="2025-11-25 09:51:31.368796401 +0000 UTC m=+897.221789777" Nov 25 09:51:32 crc kubenswrapper[4854]: I1125 09:51:32.360893 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-8hkpp" event={"ID":"f1e621b9-4a85-435a-af8b-2e17f4e74bef","Type":"ContainerStarted","Data":"1bd0a6ef07be885204709710c86272b90af80926f6162d7eaa137077c0b628fa"} Nov 25 09:51:32 crc kubenswrapper[4854]: I1125 09:51:32.361294 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-8hkpp" Nov 25 09:51:32 crc kubenswrapper[4854]: I1125 09:51:32.363997 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2tmw9" event={"ID":"772f9cbd-1f86-4502-b501-bb781c0f11fe","Type":"ContainerStarted","Data":"2dc89b2be4dfe5c99749dbc3aa0e6a088b3eabdc5cdd7348dbc4e9af78b5b1f8"} Nov 25 09:51:32 crc kubenswrapper[4854]: I1125 09:51:32.387982 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-8hkpp" podStartSLOduration=4.332223375 podStartE2EDuration="5.387950993s" podCreationTimestamp="2025-11-25 09:51:27 +0000 UTC" firstStartedPulling="2025-11-25 09:51:30.357087103 +0000 UTC m=+896.210080479" lastFinishedPulling="2025-11-25 09:51:31.412814721 +0000 UTC m=+897.265808097" observedRunningTime="2025-11-25 09:51:32.379547262 +0000 UTC m=+898.232540638" watchObservedRunningTime="2025-11-25 09:51:32.387950993 +0000 UTC m=+898.240944369" Nov 25 09:51:32 crc kubenswrapper[4854]: I1125 09:51:32.414908 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2tmw9" podStartSLOduration=2.93219716 podStartE2EDuration="5.414884214s" podCreationTimestamp="2025-11-25 09:51:27 +0000 UTC" firstStartedPulling="2025-11-25 09:51:28.927824784 +0000 UTC m=+894.780818170" lastFinishedPulling="2025-11-25 09:51:31.410511848 +0000 UTC m=+897.263505224" observedRunningTime="2025-11-25 09:51:32.412967881 +0000 UTC m=+898.265961277" watchObservedRunningTime="2025-11-25 09:51:32.414884214 +0000 UTC m=+898.267877590" Nov 25 09:51:33 crc kubenswrapper[4854]: I1125 09:51:33.376372 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-8xhgf" event={"ID":"459bf834-bbae-4e88-812d-0f7f4f2560f5","Type":"ContainerStarted","Data":"e4da68704b2db85f484dd50a02974ea34145bcb84f6bc9cae10b2189ba4a753e"} Nov 25 09:51:33 crc kubenswrapper[4854]: I1125 09:51:33.393740 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-8xhgf" podStartSLOduration=1.439642 podStartE2EDuration="6.393721378s" podCreationTimestamp="2025-11-25 09:51:27 +0000 UTC" firstStartedPulling="2025-11-25 09:51:28.245379489 +0000 UTC m=+894.098372865" lastFinishedPulling="2025-11-25 09:51:33.199458867 +0000 UTC m=+899.052452243" observedRunningTime="2025-11-25 09:51:33.391558678 +0000 UTC m=+899.244552064" watchObservedRunningTime="2025-11-25 09:51:33.393721378 +0000 UTC m=+899.246714754" Nov 25 09:51:37 crc kubenswrapper[4854]: I1125 09:51:37.750103 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-xq8dt" Nov 25 09:51:38 crc kubenswrapper[4854]: I1125 09:51:38.186153 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-765ccc5d45-ngmp2" Nov 25 09:51:38 crc kubenswrapper[4854]: I1125 09:51:38.186200 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-765ccc5d45-ngmp2" Nov 25 09:51:38 crc kubenswrapper[4854]: I1125 09:51:38.190750 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-765ccc5d45-ngmp2" Nov 25 09:51:38 crc kubenswrapper[4854]: I1125 09:51:38.421294 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-765ccc5d45-ngmp2" Nov 25 09:51:38 crc kubenswrapper[4854]: I1125 09:51:38.478868 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-7dcb9bcd7d-srrsb"] Nov 25 09:51:49 crc kubenswrapper[4854]: I1125 09:51:49.203360 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-8hkpp" Nov 25 09:52:03 crc kubenswrapper[4854]: I1125 09:52:03.527742 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-7dcb9bcd7d-srrsb" podUID="2e2bb709-f690-49a8-85e2-c559a83da899" containerName="console" containerID="cri-o://a2493fcb1250ca643dd57b92e8782027660de4498e70edfeeba41b8f1d0f1675" gracePeriod=15 Nov 25 09:52:03 crc kubenswrapper[4854]: I1125 09:52:03.937925 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-7dcb9bcd7d-srrsb_2e2bb709-f690-49a8-85e2-c559a83da899/console/0.log" Nov 25 09:52:03 crc kubenswrapper[4854]: I1125 09:52:03.938323 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7dcb9bcd7d-srrsb" Nov 25 09:52:04 crc kubenswrapper[4854]: I1125 09:52:04.043218 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2e2bb709-f690-49a8-85e2-c559a83da899-console-config\") pod \"2e2bb709-f690-49a8-85e2-c559a83da899\" (UID: \"2e2bb709-f690-49a8-85e2-c559a83da899\") " Nov 25 09:52:04 crc kubenswrapper[4854]: I1125 09:52:04.043274 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2e2bb709-f690-49a8-85e2-c559a83da899-service-ca\") pod \"2e2bb709-f690-49a8-85e2-c559a83da899\" (UID: \"2e2bb709-f690-49a8-85e2-c559a83da899\") " Nov 25 09:52:04 crc kubenswrapper[4854]: I1125 09:52:04.043291 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2e2bb709-f690-49a8-85e2-c559a83da899-trusted-ca-bundle\") pod \"2e2bb709-f690-49a8-85e2-c559a83da899\" (UID: \"2e2bb709-f690-49a8-85e2-c559a83da899\") " Nov 25 09:52:04 crc kubenswrapper[4854]: I1125 09:52:04.043758 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2e2bb709-f690-49a8-85e2-c559a83da899-console-serving-cert\") pod \"2e2bb709-f690-49a8-85e2-c559a83da899\" (UID: \"2e2bb709-f690-49a8-85e2-c559a83da899\") " Nov 25 09:52:04 crc kubenswrapper[4854]: I1125 09:52:04.043807 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2e2bb709-f690-49a8-85e2-c559a83da899-console-oauth-config\") pod \"2e2bb709-f690-49a8-85e2-c559a83da899\" (UID: \"2e2bb709-f690-49a8-85e2-c559a83da899\") " Nov 25 09:52:04 crc kubenswrapper[4854]: I1125 09:52:04.043860 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2e2bb709-f690-49a8-85e2-c559a83da899-oauth-serving-cert\") pod \"2e2bb709-f690-49a8-85e2-c559a83da899\" (UID: \"2e2bb709-f690-49a8-85e2-c559a83da899\") " Nov 25 09:52:04 crc kubenswrapper[4854]: I1125 09:52:04.043895 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d9kb7\" (UniqueName: \"kubernetes.io/projected/2e2bb709-f690-49a8-85e2-c559a83da899-kube-api-access-d9kb7\") pod \"2e2bb709-f690-49a8-85e2-c559a83da899\" (UID: \"2e2bb709-f690-49a8-85e2-c559a83da899\") " Nov 25 09:52:04 crc kubenswrapper[4854]: I1125 09:52:04.043974 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e2bb709-f690-49a8-85e2-c559a83da899-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "2e2bb709-f690-49a8-85e2-c559a83da899" (UID: "2e2bb709-f690-49a8-85e2-c559a83da899"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:52:04 crc kubenswrapper[4854]: I1125 09:52:04.043990 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e2bb709-f690-49a8-85e2-c559a83da899-service-ca" (OuterVolumeSpecName: "service-ca") pod "2e2bb709-f690-49a8-85e2-c559a83da899" (UID: "2e2bb709-f690-49a8-85e2-c559a83da899"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:52:04 crc kubenswrapper[4854]: I1125 09:52:04.044097 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e2bb709-f690-49a8-85e2-c559a83da899-console-config" (OuterVolumeSpecName: "console-config") pod "2e2bb709-f690-49a8-85e2-c559a83da899" (UID: "2e2bb709-f690-49a8-85e2-c559a83da899"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:52:04 crc kubenswrapper[4854]: I1125 09:52:04.044614 4854 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2e2bb709-f690-49a8-85e2-c559a83da899-console-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:52:04 crc kubenswrapper[4854]: I1125 09:52:04.044635 4854 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2e2bb709-f690-49a8-85e2-c559a83da899-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:52:04 crc kubenswrapper[4854]: I1125 09:52:04.044644 4854 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2e2bb709-f690-49a8-85e2-c559a83da899-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:52:04 crc kubenswrapper[4854]: I1125 09:52:04.045189 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e2bb709-f690-49a8-85e2-c559a83da899-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "2e2bb709-f690-49a8-85e2-c559a83da899" (UID: "2e2bb709-f690-49a8-85e2-c559a83da899"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:52:04 crc kubenswrapper[4854]: I1125 09:52:04.048962 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e2bb709-f690-49a8-85e2-c559a83da899-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "2e2bb709-f690-49a8-85e2-c559a83da899" (UID: "2e2bb709-f690-49a8-85e2-c559a83da899"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:52:04 crc kubenswrapper[4854]: I1125 09:52:04.049149 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e2bb709-f690-49a8-85e2-c559a83da899-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "2e2bb709-f690-49a8-85e2-c559a83da899" (UID: "2e2bb709-f690-49a8-85e2-c559a83da899"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:52:04 crc kubenswrapper[4854]: I1125 09:52:04.049272 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e2bb709-f690-49a8-85e2-c559a83da899-kube-api-access-d9kb7" (OuterVolumeSpecName: "kube-api-access-d9kb7") pod "2e2bb709-f690-49a8-85e2-c559a83da899" (UID: "2e2bb709-f690-49a8-85e2-c559a83da899"). InnerVolumeSpecName "kube-api-access-d9kb7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:52:04 crc kubenswrapper[4854]: I1125 09:52:04.148026 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d9kb7\" (UniqueName: \"kubernetes.io/projected/2e2bb709-f690-49a8-85e2-c559a83da899-kube-api-access-d9kb7\") on node \"crc\" DevicePath \"\"" Nov 25 09:52:04 crc kubenswrapper[4854]: I1125 09:52:04.148259 4854 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2e2bb709-f690-49a8-85e2-c559a83da899-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:52:04 crc kubenswrapper[4854]: I1125 09:52:04.148360 4854 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2e2bb709-f690-49a8-85e2-c559a83da899-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:52:04 crc kubenswrapper[4854]: I1125 09:52:04.148439 4854 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2e2bb709-f690-49a8-85e2-c559a83da899-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:52:04 crc kubenswrapper[4854]: I1125 09:52:04.613540 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-7dcb9bcd7d-srrsb_2e2bb709-f690-49a8-85e2-c559a83da899/console/0.log" Nov 25 09:52:04 crc kubenswrapper[4854]: I1125 09:52:04.613871 4854 generic.go:334] "Generic (PLEG): container finished" podID="2e2bb709-f690-49a8-85e2-c559a83da899" containerID="a2493fcb1250ca643dd57b92e8782027660de4498e70edfeeba41b8f1d0f1675" exitCode=2 Nov 25 09:52:04 crc kubenswrapper[4854]: I1125 09:52:04.613910 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7dcb9bcd7d-srrsb" event={"ID":"2e2bb709-f690-49a8-85e2-c559a83da899","Type":"ContainerDied","Data":"a2493fcb1250ca643dd57b92e8782027660de4498e70edfeeba41b8f1d0f1675"} Nov 25 09:52:04 crc kubenswrapper[4854]: I1125 09:52:04.613947 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7dcb9bcd7d-srrsb" event={"ID":"2e2bb709-f690-49a8-85e2-c559a83da899","Type":"ContainerDied","Data":"b43cd079dd315e59cb7429d0e313e723cf65dc3c1764d58448851abbbe94b621"} Nov 25 09:52:04 crc kubenswrapper[4854]: I1125 09:52:04.613964 4854 scope.go:117] "RemoveContainer" containerID="a2493fcb1250ca643dd57b92e8782027660de4498e70edfeeba41b8f1d0f1675" Nov 25 09:52:04 crc kubenswrapper[4854]: I1125 09:52:04.613999 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7dcb9bcd7d-srrsb" Nov 25 09:52:04 crc kubenswrapper[4854]: I1125 09:52:04.636951 4854 scope.go:117] "RemoveContainer" containerID="a2493fcb1250ca643dd57b92e8782027660de4498e70edfeeba41b8f1d0f1675" Nov 25 09:52:04 crc kubenswrapper[4854]: E1125 09:52:04.637402 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a2493fcb1250ca643dd57b92e8782027660de4498e70edfeeba41b8f1d0f1675\": container with ID starting with a2493fcb1250ca643dd57b92e8782027660de4498e70edfeeba41b8f1d0f1675 not found: ID does not exist" containerID="a2493fcb1250ca643dd57b92e8782027660de4498e70edfeeba41b8f1d0f1675" Nov 25 09:52:04 crc kubenswrapper[4854]: I1125 09:52:04.637440 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a2493fcb1250ca643dd57b92e8782027660de4498e70edfeeba41b8f1d0f1675"} err="failed to get container status \"a2493fcb1250ca643dd57b92e8782027660de4498e70edfeeba41b8f1d0f1675\": rpc error: code = NotFound desc = could not find container \"a2493fcb1250ca643dd57b92e8782027660de4498e70edfeeba41b8f1d0f1675\": container with ID starting with a2493fcb1250ca643dd57b92e8782027660de4498e70edfeeba41b8f1d0f1675 not found: ID does not exist" Nov 25 09:52:04 crc kubenswrapper[4854]: I1125 09:52:04.644556 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-7dcb9bcd7d-srrsb"] Nov 25 09:52:04 crc kubenswrapper[4854]: I1125 09:52:04.650976 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-7dcb9bcd7d-srrsb"] Nov 25 09:52:05 crc kubenswrapper[4854]: I1125 09:52:05.023039 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e2bb709-f690-49a8-85e2-c559a83da899" path="/var/lib/kubelet/pods/2e2bb709-f690-49a8-85e2-c559a83da899/volumes" Nov 25 09:52:06 crc kubenswrapper[4854]: I1125 09:52:06.157844 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69fl57"] Nov 25 09:52:06 crc kubenswrapper[4854]: E1125 09:52:06.158420 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e2bb709-f690-49a8-85e2-c559a83da899" containerName="console" Nov 25 09:52:06 crc kubenswrapper[4854]: I1125 09:52:06.158453 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e2bb709-f690-49a8-85e2-c559a83da899" containerName="console" Nov 25 09:52:06 crc kubenswrapper[4854]: I1125 09:52:06.158586 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e2bb709-f690-49a8-85e2-c559a83da899" containerName="console" Nov 25 09:52:06 crc kubenswrapper[4854]: I1125 09:52:06.159645 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69fl57" Nov 25 09:52:06 crc kubenswrapper[4854]: I1125 09:52:06.161892 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 25 09:52:06 crc kubenswrapper[4854]: I1125 09:52:06.173066 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69fl57"] Nov 25 09:52:06 crc kubenswrapper[4854]: I1125 09:52:06.282779 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n2kgb\" (UniqueName: \"kubernetes.io/projected/53ec1a11-25b3-41de-bde3-7c4d63dc5ce5-kube-api-access-n2kgb\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69fl57\" (UID: \"53ec1a11-25b3-41de-bde3-7c4d63dc5ce5\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69fl57" Nov 25 09:52:06 crc kubenswrapper[4854]: I1125 09:52:06.282897 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/53ec1a11-25b3-41de-bde3-7c4d63dc5ce5-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69fl57\" (UID: \"53ec1a11-25b3-41de-bde3-7c4d63dc5ce5\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69fl57" Nov 25 09:52:06 crc kubenswrapper[4854]: I1125 09:52:06.282935 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/53ec1a11-25b3-41de-bde3-7c4d63dc5ce5-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69fl57\" (UID: \"53ec1a11-25b3-41de-bde3-7c4d63dc5ce5\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69fl57" Nov 25 09:52:06 crc kubenswrapper[4854]: I1125 09:52:06.384695 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n2kgb\" (UniqueName: \"kubernetes.io/projected/53ec1a11-25b3-41de-bde3-7c4d63dc5ce5-kube-api-access-n2kgb\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69fl57\" (UID: \"53ec1a11-25b3-41de-bde3-7c4d63dc5ce5\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69fl57" Nov 25 09:52:06 crc kubenswrapper[4854]: I1125 09:52:06.384819 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/53ec1a11-25b3-41de-bde3-7c4d63dc5ce5-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69fl57\" (UID: \"53ec1a11-25b3-41de-bde3-7c4d63dc5ce5\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69fl57" Nov 25 09:52:06 crc kubenswrapper[4854]: I1125 09:52:06.384853 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/53ec1a11-25b3-41de-bde3-7c4d63dc5ce5-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69fl57\" (UID: \"53ec1a11-25b3-41de-bde3-7c4d63dc5ce5\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69fl57" Nov 25 09:52:06 crc kubenswrapper[4854]: I1125 09:52:06.385396 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/53ec1a11-25b3-41de-bde3-7c4d63dc5ce5-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69fl57\" (UID: \"53ec1a11-25b3-41de-bde3-7c4d63dc5ce5\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69fl57" Nov 25 09:52:06 crc kubenswrapper[4854]: I1125 09:52:06.385424 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/53ec1a11-25b3-41de-bde3-7c4d63dc5ce5-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69fl57\" (UID: \"53ec1a11-25b3-41de-bde3-7c4d63dc5ce5\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69fl57" Nov 25 09:52:06 crc kubenswrapper[4854]: I1125 09:52:06.404955 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n2kgb\" (UniqueName: \"kubernetes.io/projected/53ec1a11-25b3-41de-bde3-7c4d63dc5ce5-kube-api-access-n2kgb\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69fl57\" (UID: \"53ec1a11-25b3-41de-bde3-7c4d63dc5ce5\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69fl57" Nov 25 09:52:06 crc kubenswrapper[4854]: I1125 09:52:06.482180 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69fl57" Nov 25 09:52:06 crc kubenswrapper[4854]: I1125 09:52:06.960858 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69fl57"] Nov 25 09:52:06 crc kubenswrapper[4854]: W1125 09:52:06.970402 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod53ec1a11_25b3_41de_bde3_7c4d63dc5ce5.slice/crio-5eae6edb6e3d6f023cd758eaa6d21a65ab2fde3cf0f1f8cc256fdeadeb5e5ac5 WatchSource:0}: Error finding container 5eae6edb6e3d6f023cd758eaa6d21a65ab2fde3cf0f1f8cc256fdeadeb5e5ac5: Status 404 returned error can't find the container with id 5eae6edb6e3d6f023cd758eaa6d21a65ab2fde3cf0f1f8cc256fdeadeb5e5ac5 Nov 25 09:52:07 crc kubenswrapper[4854]: I1125 09:52:07.649191 4854 generic.go:334] "Generic (PLEG): container finished" podID="53ec1a11-25b3-41de-bde3-7c4d63dc5ce5" containerID="46b2b18ead318f92c38dacb1506cdca5f7d5a773e5883c8f3d5e42c8f7cdbb74" exitCode=0 Nov 25 09:52:07 crc kubenswrapper[4854]: I1125 09:52:07.649264 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69fl57" event={"ID":"53ec1a11-25b3-41de-bde3-7c4d63dc5ce5","Type":"ContainerDied","Data":"46b2b18ead318f92c38dacb1506cdca5f7d5a773e5883c8f3d5e42c8f7cdbb74"} Nov 25 09:52:07 crc kubenswrapper[4854]: I1125 09:52:07.649571 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69fl57" event={"ID":"53ec1a11-25b3-41de-bde3-7c4d63dc5ce5","Type":"ContainerStarted","Data":"5eae6edb6e3d6f023cd758eaa6d21a65ab2fde3cf0f1f8cc256fdeadeb5e5ac5"} Nov 25 09:52:07 crc kubenswrapper[4854]: I1125 09:52:07.650740 4854 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 09:52:09 crc kubenswrapper[4854]: I1125 09:52:09.665643 4854 generic.go:334] "Generic (PLEG): container finished" podID="53ec1a11-25b3-41de-bde3-7c4d63dc5ce5" containerID="43dae2cdd6d865f812784dff569aa62f392f270ffb3a62ba6542c8e56a332920" exitCode=0 Nov 25 09:52:09 crc kubenswrapper[4854]: I1125 09:52:09.666012 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69fl57" event={"ID":"53ec1a11-25b3-41de-bde3-7c4d63dc5ce5","Type":"ContainerDied","Data":"43dae2cdd6d865f812784dff569aa62f392f270ffb3a62ba6542c8e56a332920"} Nov 25 09:52:10 crc kubenswrapper[4854]: I1125 09:52:10.674927 4854 generic.go:334] "Generic (PLEG): container finished" podID="53ec1a11-25b3-41de-bde3-7c4d63dc5ce5" containerID="b8a00f1a20aee358a6b69248d62c43022d09d5ccf3094773d60ddc1f24167492" exitCode=0 Nov 25 09:52:10 crc kubenswrapper[4854]: I1125 09:52:10.674984 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69fl57" event={"ID":"53ec1a11-25b3-41de-bde3-7c4d63dc5ce5","Type":"ContainerDied","Data":"b8a00f1a20aee358a6b69248d62c43022d09d5ccf3094773d60ddc1f24167492"} Nov 25 09:52:11 crc kubenswrapper[4854]: I1125 09:52:11.971814 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69fl57" Nov 25 09:52:11 crc kubenswrapper[4854]: I1125 09:52:11.997918 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/53ec1a11-25b3-41de-bde3-7c4d63dc5ce5-bundle\") pod \"53ec1a11-25b3-41de-bde3-7c4d63dc5ce5\" (UID: \"53ec1a11-25b3-41de-bde3-7c4d63dc5ce5\") " Nov 25 09:52:11 crc kubenswrapper[4854]: I1125 09:52:11.998013 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n2kgb\" (UniqueName: \"kubernetes.io/projected/53ec1a11-25b3-41de-bde3-7c4d63dc5ce5-kube-api-access-n2kgb\") pod \"53ec1a11-25b3-41de-bde3-7c4d63dc5ce5\" (UID: \"53ec1a11-25b3-41de-bde3-7c4d63dc5ce5\") " Nov 25 09:52:12 crc kubenswrapper[4854]: I1125 09:52:11.998119 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/53ec1a11-25b3-41de-bde3-7c4d63dc5ce5-util\") pod \"53ec1a11-25b3-41de-bde3-7c4d63dc5ce5\" (UID: \"53ec1a11-25b3-41de-bde3-7c4d63dc5ce5\") " Nov 25 09:52:12 crc kubenswrapper[4854]: I1125 09:52:12.004648 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/53ec1a11-25b3-41de-bde3-7c4d63dc5ce5-bundle" (OuterVolumeSpecName: "bundle") pod "53ec1a11-25b3-41de-bde3-7c4d63dc5ce5" (UID: "53ec1a11-25b3-41de-bde3-7c4d63dc5ce5"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:52:12 crc kubenswrapper[4854]: I1125 09:52:12.010871 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/53ec1a11-25b3-41de-bde3-7c4d63dc5ce5-kube-api-access-n2kgb" (OuterVolumeSpecName: "kube-api-access-n2kgb") pod "53ec1a11-25b3-41de-bde3-7c4d63dc5ce5" (UID: "53ec1a11-25b3-41de-bde3-7c4d63dc5ce5"). InnerVolumeSpecName "kube-api-access-n2kgb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:52:12 crc kubenswrapper[4854]: I1125 09:52:12.012248 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/53ec1a11-25b3-41de-bde3-7c4d63dc5ce5-util" (OuterVolumeSpecName: "util") pod "53ec1a11-25b3-41de-bde3-7c4d63dc5ce5" (UID: "53ec1a11-25b3-41de-bde3-7c4d63dc5ce5"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:52:12 crc kubenswrapper[4854]: I1125 09:52:12.106090 4854 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/53ec1a11-25b3-41de-bde3-7c4d63dc5ce5-util\") on node \"crc\" DevicePath \"\"" Nov 25 09:52:12 crc kubenswrapper[4854]: I1125 09:52:12.106154 4854 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/53ec1a11-25b3-41de-bde3-7c4d63dc5ce5-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:52:12 crc kubenswrapper[4854]: I1125 09:52:12.106308 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n2kgb\" (UniqueName: \"kubernetes.io/projected/53ec1a11-25b3-41de-bde3-7c4d63dc5ce5-kube-api-access-n2kgb\") on node \"crc\" DevicePath \"\"" Nov 25 09:52:12 crc kubenswrapper[4854]: I1125 09:52:12.691577 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69fl57" event={"ID":"53ec1a11-25b3-41de-bde3-7c4d63dc5ce5","Type":"ContainerDied","Data":"5eae6edb6e3d6f023cd758eaa6d21a65ab2fde3cf0f1f8cc256fdeadeb5e5ac5"} Nov 25 09:52:12 crc kubenswrapper[4854]: I1125 09:52:12.691616 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5eae6edb6e3d6f023cd758eaa6d21a65ab2fde3cf0f1f8cc256fdeadeb5e5ac5" Nov 25 09:52:12 crc kubenswrapper[4854]: I1125 09:52:12.691716 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69fl57" Nov 25 09:52:19 crc kubenswrapper[4854]: I1125 09:52:19.908248 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-687945d4d9-n2f6z"] Nov 25 09:52:19 crc kubenswrapper[4854]: E1125 09:52:19.909127 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53ec1a11-25b3-41de-bde3-7c4d63dc5ce5" containerName="util" Nov 25 09:52:19 crc kubenswrapper[4854]: I1125 09:52:19.909143 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="53ec1a11-25b3-41de-bde3-7c4d63dc5ce5" containerName="util" Nov 25 09:52:19 crc kubenswrapper[4854]: E1125 09:52:19.909158 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53ec1a11-25b3-41de-bde3-7c4d63dc5ce5" containerName="pull" Nov 25 09:52:19 crc kubenswrapper[4854]: I1125 09:52:19.909166 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="53ec1a11-25b3-41de-bde3-7c4d63dc5ce5" containerName="pull" Nov 25 09:52:19 crc kubenswrapper[4854]: E1125 09:52:19.909179 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53ec1a11-25b3-41de-bde3-7c4d63dc5ce5" containerName="extract" Nov 25 09:52:19 crc kubenswrapper[4854]: I1125 09:52:19.909186 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="53ec1a11-25b3-41de-bde3-7c4d63dc5ce5" containerName="extract" Nov 25 09:52:19 crc kubenswrapper[4854]: I1125 09:52:19.909338 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="53ec1a11-25b3-41de-bde3-7c4d63dc5ce5" containerName="extract" Nov 25 09:52:19 crc kubenswrapper[4854]: I1125 09:52:19.909960 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-687945d4d9-n2f6z" Nov 25 09:52:19 crc kubenswrapper[4854]: I1125 09:52:19.911701 4854 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-jbhdv" Nov 25 09:52:19 crc kubenswrapper[4854]: I1125 09:52:19.912232 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 25 09:52:19 crc kubenswrapper[4854]: I1125 09:52:19.913418 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 25 09:52:19 crc kubenswrapper[4854]: I1125 09:52:19.913418 4854 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 25 09:52:19 crc kubenswrapper[4854]: I1125 09:52:19.914416 4854 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 25 09:52:19 crc kubenswrapper[4854]: I1125 09:52:19.926203 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-687945d4d9-n2f6z"] Nov 25 09:52:19 crc kubenswrapper[4854]: I1125 09:52:19.942178 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/493a8b3d-2aca-4cc2-a2f1-158ed88ca234-apiservice-cert\") pod \"metallb-operator-controller-manager-687945d4d9-n2f6z\" (UID: \"493a8b3d-2aca-4cc2-a2f1-158ed88ca234\") " pod="metallb-system/metallb-operator-controller-manager-687945d4d9-n2f6z" Nov 25 09:52:19 crc kubenswrapper[4854]: I1125 09:52:19.942277 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c9pbv\" (UniqueName: \"kubernetes.io/projected/493a8b3d-2aca-4cc2-a2f1-158ed88ca234-kube-api-access-c9pbv\") pod \"metallb-operator-controller-manager-687945d4d9-n2f6z\" (UID: \"493a8b3d-2aca-4cc2-a2f1-158ed88ca234\") " pod="metallb-system/metallb-operator-controller-manager-687945d4d9-n2f6z" Nov 25 09:52:19 crc kubenswrapper[4854]: I1125 09:52:19.942300 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/493a8b3d-2aca-4cc2-a2f1-158ed88ca234-webhook-cert\") pod \"metallb-operator-controller-manager-687945d4d9-n2f6z\" (UID: \"493a8b3d-2aca-4cc2-a2f1-158ed88ca234\") " pod="metallb-system/metallb-operator-controller-manager-687945d4d9-n2f6z" Nov 25 09:52:20 crc kubenswrapper[4854]: I1125 09:52:20.043369 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/493a8b3d-2aca-4cc2-a2f1-158ed88ca234-apiservice-cert\") pod \"metallb-operator-controller-manager-687945d4d9-n2f6z\" (UID: \"493a8b3d-2aca-4cc2-a2f1-158ed88ca234\") " pod="metallb-system/metallb-operator-controller-manager-687945d4d9-n2f6z" Nov 25 09:52:20 crc kubenswrapper[4854]: I1125 09:52:20.043497 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c9pbv\" (UniqueName: \"kubernetes.io/projected/493a8b3d-2aca-4cc2-a2f1-158ed88ca234-kube-api-access-c9pbv\") pod \"metallb-operator-controller-manager-687945d4d9-n2f6z\" (UID: \"493a8b3d-2aca-4cc2-a2f1-158ed88ca234\") " pod="metallb-system/metallb-operator-controller-manager-687945d4d9-n2f6z" Nov 25 09:52:20 crc kubenswrapper[4854]: I1125 09:52:20.043518 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/493a8b3d-2aca-4cc2-a2f1-158ed88ca234-webhook-cert\") pod \"metallb-operator-controller-manager-687945d4d9-n2f6z\" (UID: \"493a8b3d-2aca-4cc2-a2f1-158ed88ca234\") " pod="metallb-system/metallb-operator-controller-manager-687945d4d9-n2f6z" Nov 25 09:52:20 crc kubenswrapper[4854]: I1125 09:52:20.049603 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/493a8b3d-2aca-4cc2-a2f1-158ed88ca234-apiservice-cert\") pod \"metallb-operator-controller-manager-687945d4d9-n2f6z\" (UID: \"493a8b3d-2aca-4cc2-a2f1-158ed88ca234\") " pod="metallb-system/metallb-operator-controller-manager-687945d4d9-n2f6z" Nov 25 09:52:20 crc kubenswrapper[4854]: I1125 09:52:20.051155 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/493a8b3d-2aca-4cc2-a2f1-158ed88ca234-webhook-cert\") pod \"metallb-operator-controller-manager-687945d4d9-n2f6z\" (UID: \"493a8b3d-2aca-4cc2-a2f1-158ed88ca234\") " pod="metallb-system/metallb-operator-controller-manager-687945d4d9-n2f6z" Nov 25 09:52:20 crc kubenswrapper[4854]: I1125 09:52:20.067599 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c9pbv\" (UniqueName: \"kubernetes.io/projected/493a8b3d-2aca-4cc2-a2f1-158ed88ca234-kube-api-access-c9pbv\") pod \"metallb-operator-controller-manager-687945d4d9-n2f6z\" (UID: \"493a8b3d-2aca-4cc2-a2f1-158ed88ca234\") " pod="metallb-system/metallb-operator-controller-manager-687945d4d9-n2f6z" Nov 25 09:52:20 crc kubenswrapper[4854]: I1125 09:52:20.228617 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-687945d4d9-n2f6z" Nov 25 09:52:20 crc kubenswrapper[4854]: I1125 09:52:20.411407 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-fb9df45c-vx6gh"] Nov 25 09:52:20 crc kubenswrapper[4854]: I1125 09:52:20.414790 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-fb9df45c-vx6gh" Nov 25 09:52:20 crc kubenswrapper[4854]: I1125 09:52:20.420504 4854 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 25 09:52:20 crc kubenswrapper[4854]: I1125 09:52:20.420735 4854 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-d2m8b" Nov 25 09:52:20 crc kubenswrapper[4854]: I1125 09:52:20.440765 4854 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 25 09:52:20 crc kubenswrapper[4854]: I1125 09:52:20.442114 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-fb9df45c-vx6gh"] Nov 25 09:52:20 crc kubenswrapper[4854]: I1125 09:52:20.464376 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ed8237ae-873c-431e-9283-3a5a4dc63333-webhook-cert\") pod \"metallb-operator-webhook-server-fb9df45c-vx6gh\" (UID: \"ed8237ae-873c-431e-9283-3a5a4dc63333\") " pod="metallb-system/metallb-operator-webhook-server-fb9df45c-vx6gh" Nov 25 09:52:20 crc kubenswrapper[4854]: I1125 09:52:20.464506 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8hcmw\" (UniqueName: \"kubernetes.io/projected/ed8237ae-873c-431e-9283-3a5a4dc63333-kube-api-access-8hcmw\") pod \"metallb-operator-webhook-server-fb9df45c-vx6gh\" (UID: \"ed8237ae-873c-431e-9283-3a5a4dc63333\") " pod="metallb-system/metallb-operator-webhook-server-fb9df45c-vx6gh" Nov 25 09:52:20 crc kubenswrapper[4854]: I1125 09:52:20.464560 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ed8237ae-873c-431e-9283-3a5a4dc63333-apiservice-cert\") pod \"metallb-operator-webhook-server-fb9df45c-vx6gh\" (UID: \"ed8237ae-873c-431e-9283-3a5a4dc63333\") " pod="metallb-system/metallb-operator-webhook-server-fb9df45c-vx6gh" Nov 25 09:52:20 crc kubenswrapper[4854]: I1125 09:52:20.566238 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8hcmw\" (UniqueName: \"kubernetes.io/projected/ed8237ae-873c-431e-9283-3a5a4dc63333-kube-api-access-8hcmw\") pod \"metallb-operator-webhook-server-fb9df45c-vx6gh\" (UID: \"ed8237ae-873c-431e-9283-3a5a4dc63333\") " pod="metallb-system/metallb-operator-webhook-server-fb9df45c-vx6gh" Nov 25 09:52:20 crc kubenswrapper[4854]: I1125 09:52:20.566355 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ed8237ae-873c-431e-9283-3a5a4dc63333-apiservice-cert\") pod \"metallb-operator-webhook-server-fb9df45c-vx6gh\" (UID: \"ed8237ae-873c-431e-9283-3a5a4dc63333\") " pod="metallb-system/metallb-operator-webhook-server-fb9df45c-vx6gh" Nov 25 09:52:20 crc kubenswrapper[4854]: I1125 09:52:20.566388 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ed8237ae-873c-431e-9283-3a5a4dc63333-webhook-cert\") pod \"metallb-operator-webhook-server-fb9df45c-vx6gh\" (UID: \"ed8237ae-873c-431e-9283-3a5a4dc63333\") " pod="metallb-system/metallb-operator-webhook-server-fb9df45c-vx6gh" Nov 25 09:52:20 crc kubenswrapper[4854]: I1125 09:52:20.574957 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ed8237ae-873c-431e-9283-3a5a4dc63333-apiservice-cert\") pod \"metallb-operator-webhook-server-fb9df45c-vx6gh\" (UID: \"ed8237ae-873c-431e-9283-3a5a4dc63333\") " pod="metallb-system/metallb-operator-webhook-server-fb9df45c-vx6gh" Nov 25 09:52:20 crc kubenswrapper[4854]: I1125 09:52:20.577211 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ed8237ae-873c-431e-9283-3a5a4dc63333-webhook-cert\") pod \"metallb-operator-webhook-server-fb9df45c-vx6gh\" (UID: \"ed8237ae-873c-431e-9283-3a5a4dc63333\") " pod="metallb-system/metallb-operator-webhook-server-fb9df45c-vx6gh" Nov 25 09:52:20 crc kubenswrapper[4854]: I1125 09:52:20.589847 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8hcmw\" (UniqueName: \"kubernetes.io/projected/ed8237ae-873c-431e-9283-3a5a4dc63333-kube-api-access-8hcmw\") pod \"metallb-operator-webhook-server-fb9df45c-vx6gh\" (UID: \"ed8237ae-873c-431e-9283-3a5a4dc63333\") " pod="metallb-system/metallb-operator-webhook-server-fb9df45c-vx6gh" Nov 25 09:52:20 crc kubenswrapper[4854]: I1125 09:52:20.772526 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-687945d4d9-n2f6z"] Nov 25 09:52:20 crc kubenswrapper[4854]: W1125 09:52:20.781312 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod493a8b3d_2aca_4cc2_a2f1_158ed88ca234.slice/crio-ace2b22782df7b2958617eecadf23c63784498055fc66731ea819389c61c0740 WatchSource:0}: Error finding container ace2b22782df7b2958617eecadf23c63784498055fc66731ea819389c61c0740: Status 404 returned error can't find the container with id ace2b22782df7b2958617eecadf23c63784498055fc66731ea819389c61c0740 Nov 25 09:52:20 crc kubenswrapper[4854]: I1125 09:52:20.789983 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-fb9df45c-vx6gh" Nov 25 09:52:21 crc kubenswrapper[4854]: W1125 09:52:21.233359 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poded8237ae_873c_431e_9283_3a5a4dc63333.slice/crio-f8566e3baa26bf2c391ab87ab701fa44b505f83ad1cfd88177537878a1efc859 WatchSource:0}: Error finding container f8566e3baa26bf2c391ab87ab701fa44b505f83ad1cfd88177537878a1efc859: Status 404 returned error can't find the container with id f8566e3baa26bf2c391ab87ab701fa44b505f83ad1cfd88177537878a1efc859 Nov 25 09:52:21 crc kubenswrapper[4854]: I1125 09:52:21.233931 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-fb9df45c-vx6gh"] Nov 25 09:52:21 crc kubenswrapper[4854]: I1125 09:52:21.779573 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-fb9df45c-vx6gh" event={"ID":"ed8237ae-873c-431e-9283-3a5a4dc63333","Type":"ContainerStarted","Data":"f8566e3baa26bf2c391ab87ab701fa44b505f83ad1cfd88177537878a1efc859"} Nov 25 09:52:21 crc kubenswrapper[4854]: I1125 09:52:21.781719 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-687945d4d9-n2f6z" event={"ID":"493a8b3d-2aca-4cc2-a2f1-158ed88ca234","Type":"ContainerStarted","Data":"ace2b22782df7b2958617eecadf23c63784498055fc66731ea819389c61c0740"} Nov 25 09:52:25 crc kubenswrapper[4854]: I1125 09:52:25.029353 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:52:25 crc kubenswrapper[4854]: I1125 09:52:25.029651 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:52:26 crc kubenswrapper[4854]: I1125 09:52:26.835623 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-fb9df45c-vx6gh" event={"ID":"ed8237ae-873c-431e-9283-3a5a4dc63333","Type":"ContainerStarted","Data":"681f138019bb526804ee128107fc3a5c01b51940a256ec996b028d8f008e01de"} Nov 25 09:52:26 crc kubenswrapper[4854]: I1125 09:52:26.836223 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-fb9df45c-vx6gh" Nov 25 09:52:26 crc kubenswrapper[4854]: I1125 09:52:26.838785 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-687945d4d9-n2f6z" event={"ID":"493a8b3d-2aca-4cc2-a2f1-158ed88ca234","Type":"ContainerStarted","Data":"00784233dd6825369d5f5f772f6cda6c42dc89f2ecc6cdbf0b63d1f2c486addd"} Nov 25 09:52:26 crc kubenswrapper[4854]: I1125 09:52:26.838916 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-687945d4d9-n2f6z" Nov 25 09:52:26 crc kubenswrapper[4854]: I1125 09:52:26.857470 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-fb9df45c-vx6gh" podStartSLOduration=2.112962595 podStartE2EDuration="6.857453249s" podCreationTimestamp="2025-11-25 09:52:20 +0000 UTC" firstStartedPulling="2025-11-25 09:52:21.237526183 +0000 UTC m=+947.090519559" lastFinishedPulling="2025-11-25 09:52:25.982016827 +0000 UTC m=+951.835010213" observedRunningTime="2025-11-25 09:52:26.856343758 +0000 UTC m=+952.709337134" watchObservedRunningTime="2025-11-25 09:52:26.857453249 +0000 UTC m=+952.710446625" Nov 25 09:52:26 crc kubenswrapper[4854]: I1125 09:52:26.876059 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-687945d4d9-n2f6z" podStartSLOduration=2.697745517 podStartE2EDuration="7.876036289s" podCreationTimestamp="2025-11-25 09:52:19 +0000 UTC" firstStartedPulling="2025-11-25 09:52:20.783789377 +0000 UTC m=+946.636782753" lastFinishedPulling="2025-11-25 09:52:25.962080149 +0000 UTC m=+951.815073525" observedRunningTime="2025-11-25 09:52:26.874533098 +0000 UTC m=+952.727526494" watchObservedRunningTime="2025-11-25 09:52:26.876036289 +0000 UTC m=+952.729029665" Nov 25 09:52:40 crc kubenswrapper[4854]: I1125 09:52:40.796145 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-fb9df45c-vx6gh" Nov 25 09:52:55 crc kubenswrapper[4854]: I1125 09:52:55.029054 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:52:55 crc kubenswrapper[4854]: I1125 09:52:55.029634 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:53:00 crc kubenswrapper[4854]: I1125 09:53:00.233435 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-687945d4d9-n2f6z" Nov 25 09:53:00 crc kubenswrapper[4854]: I1125 09:53:00.966716 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-f9q69"] Nov 25 09:53:00 crc kubenswrapper[4854]: I1125 09:53:00.970803 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-f9q69" Nov 25 09:53:00 crc kubenswrapper[4854]: I1125 09:53:00.973756 4854 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 25 09:53:00 crc kubenswrapper[4854]: I1125 09:53:00.973956 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 25 09:53:00 crc kubenswrapper[4854]: I1125 09:53:00.974118 4854 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-pqn99" Nov 25 09:53:00 crc kubenswrapper[4854]: I1125 09:53:00.974978 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-52mph"] Nov 25 09:53:00 crc kubenswrapper[4854]: I1125 09:53:00.976063 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-52mph" Nov 25 09:53:00 crc kubenswrapper[4854]: I1125 09:53:00.981307 4854 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 25 09:53:00 crc kubenswrapper[4854]: I1125 09:53:00.989326 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-52mph"] Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.067059 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-zlgw9"] Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.072656 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-zlgw9" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.075608 4854 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-p24tg" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.075964 4854 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.076161 4854 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.076420 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.106218 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6c7b4b5f48-dx66b"] Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.107627 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-dx66b" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.110386 4854 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.119135 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/a3c67adc-f296-4e37-a023-1c478d8abcd7-reloader\") pod \"frr-k8s-f9q69\" (UID: \"a3c67adc-f296-4e37-a023-1c478d8abcd7\") " pod="metallb-system/frr-k8s-f9q69" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.119263 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j2m65\" (UniqueName: \"kubernetes.io/projected/a3c67adc-f296-4e37-a023-1c478d8abcd7-kube-api-access-j2m65\") pod \"frr-k8s-f9q69\" (UID: \"a3c67adc-f296-4e37-a023-1c478d8abcd7\") " pod="metallb-system/frr-k8s-f9q69" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.119288 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/a3c67adc-f296-4e37-a023-1c478d8abcd7-frr-sockets\") pod \"frr-k8s-f9q69\" (UID: \"a3c67adc-f296-4e37-a023-1c478d8abcd7\") " pod="metallb-system/frr-k8s-f9q69" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.119334 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/a3c67adc-f296-4e37-a023-1c478d8abcd7-frr-conf\") pod \"frr-k8s-f9q69\" (UID: \"a3c67adc-f296-4e37-a023-1c478d8abcd7\") " pod="metallb-system/frr-k8s-f9q69" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.119364 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/a3c67adc-f296-4e37-a023-1c478d8abcd7-metrics\") pod \"frr-k8s-f9q69\" (UID: \"a3c67adc-f296-4e37-a023-1c478d8abcd7\") " pod="metallb-system/frr-k8s-f9q69" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.119429 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/570de8a9-89ac-41e7-973c-d76485b7f41d-cert\") pod \"frr-k8s-webhook-server-6998585d5-52mph\" (UID: \"570de8a9-89ac-41e7-973c-d76485b7f41d\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-52mph" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.119490 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cmfps\" (UniqueName: \"kubernetes.io/projected/570de8a9-89ac-41e7-973c-d76485b7f41d-kube-api-access-cmfps\") pod \"frr-k8s-webhook-server-6998585d5-52mph\" (UID: \"570de8a9-89ac-41e7-973c-d76485b7f41d\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-52mph" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.119522 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a3c67adc-f296-4e37-a023-1c478d8abcd7-metrics-certs\") pod \"frr-k8s-f9q69\" (UID: \"a3c67adc-f296-4e37-a023-1c478d8abcd7\") " pod="metallb-system/frr-k8s-f9q69" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.119547 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/a3c67adc-f296-4e37-a023-1c478d8abcd7-frr-startup\") pod \"frr-k8s-f9q69\" (UID: \"a3c67adc-f296-4e37-a023-1c478d8abcd7\") " pod="metallb-system/frr-k8s-f9q69" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.140906 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-dx66b"] Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.220345 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/570de8a9-89ac-41e7-973c-d76485b7f41d-cert\") pod \"frr-k8s-webhook-server-6998585d5-52mph\" (UID: \"570de8a9-89ac-41e7-973c-d76485b7f41d\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-52mph" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.220397 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2fxf9\" (UniqueName: \"kubernetes.io/projected/886977c7-abf1-4a92-8d3f-3c14fc0ea0f1-kube-api-access-2fxf9\") pod \"speaker-zlgw9\" (UID: \"886977c7-abf1-4a92-8d3f-3c14fc0ea0f1\") " pod="metallb-system/speaker-zlgw9" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.220419 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/886977c7-abf1-4a92-8d3f-3c14fc0ea0f1-metrics-certs\") pod \"speaker-zlgw9\" (UID: \"886977c7-abf1-4a92-8d3f-3c14fc0ea0f1\") " pod="metallb-system/speaker-zlgw9" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.220452 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cmfps\" (UniqueName: \"kubernetes.io/projected/570de8a9-89ac-41e7-973c-d76485b7f41d-kube-api-access-cmfps\") pod \"frr-k8s-webhook-server-6998585d5-52mph\" (UID: \"570de8a9-89ac-41e7-973c-d76485b7f41d\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-52mph" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.220472 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/52bdf1ca-71ae-4bb7-a4e9-593da3d912cf-cert\") pod \"controller-6c7b4b5f48-dx66b\" (UID: \"52bdf1ca-71ae-4bb7-a4e9-593da3d912cf\") " pod="metallb-system/controller-6c7b4b5f48-dx66b" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.220490 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a3c67adc-f296-4e37-a023-1c478d8abcd7-metrics-certs\") pod \"frr-k8s-f9q69\" (UID: \"a3c67adc-f296-4e37-a023-1c478d8abcd7\") " pod="metallb-system/frr-k8s-f9q69" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.220508 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/a3c67adc-f296-4e37-a023-1c478d8abcd7-frr-startup\") pod \"frr-k8s-f9q69\" (UID: \"a3c67adc-f296-4e37-a023-1c478d8abcd7\") " pod="metallb-system/frr-k8s-f9q69" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.220540 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/886977c7-abf1-4a92-8d3f-3c14fc0ea0f1-metallb-excludel2\") pod \"speaker-zlgw9\" (UID: \"886977c7-abf1-4a92-8d3f-3c14fc0ea0f1\") " pod="metallb-system/speaker-zlgw9" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.220564 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/a3c67adc-f296-4e37-a023-1c478d8abcd7-reloader\") pod \"frr-k8s-f9q69\" (UID: \"a3c67adc-f296-4e37-a023-1c478d8abcd7\") " pod="metallb-system/frr-k8s-f9q69" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.220596 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jrzdd\" (UniqueName: \"kubernetes.io/projected/52bdf1ca-71ae-4bb7-a4e9-593da3d912cf-kube-api-access-jrzdd\") pod \"controller-6c7b4b5f48-dx66b\" (UID: \"52bdf1ca-71ae-4bb7-a4e9-593da3d912cf\") " pod="metallb-system/controller-6c7b4b5f48-dx66b" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.220627 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/886977c7-abf1-4a92-8d3f-3c14fc0ea0f1-memberlist\") pod \"speaker-zlgw9\" (UID: \"886977c7-abf1-4a92-8d3f-3c14fc0ea0f1\") " pod="metallb-system/speaker-zlgw9" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.220658 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/52bdf1ca-71ae-4bb7-a4e9-593da3d912cf-metrics-certs\") pod \"controller-6c7b4b5f48-dx66b\" (UID: \"52bdf1ca-71ae-4bb7-a4e9-593da3d912cf\") " pod="metallb-system/controller-6c7b4b5f48-dx66b" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.220692 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/a3c67adc-f296-4e37-a023-1c478d8abcd7-frr-sockets\") pod \"frr-k8s-f9q69\" (UID: \"a3c67adc-f296-4e37-a023-1c478d8abcd7\") " pod="metallb-system/frr-k8s-f9q69" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.220726 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j2m65\" (UniqueName: \"kubernetes.io/projected/a3c67adc-f296-4e37-a023-1c478d8abcd7-kube-api-access-j2m65\") pod \"frr-k8s-f9q69\" (UID: \"a3c67adc-f296-4e37-a023-1c478d8abcd7\") " pod="metallb-system/frr-k8s-f9q69" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.220756 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/a3c67adc-f296-4e37-a023-1c478d8abcd7-frr-conf\") pod \"frr-k8s-f9q69\" (UID: \"a3c67adc-f296-4e37-a023-1c478d8abcd7\") " pod="metallb-system/frr-k8s-f9q69" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.220821 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/a3c67adc-f296-4e37-a023-1c478d8abcd7-metrics\") pod \"frr-k8s-f9q69\" (UID: \"a3c67adc-f296-4e37-a023-1c478d8abcd7\") " pod="metallb-system/frr-k8s-f9q69" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.221218 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/a3c67adc-f296-4e37-a023-1c478d8abcd7-metrics\") pod \"frr-k8s-f9q69\" (UID: \"a3c67adc-f296-4e37-a023-1c478d8abcd7\") " pod="metallb-system/frr-k8s-f9q69" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.221584 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/a3c67adc-f296-4e37-a023-1c478d8abcd7-frr-sockets\") pod \"frr-k8s-f9q69\" (UID: \"a3c67adc-f296-4e37-a023-1c478d8abcd7\") " pod="metallb-system/frr-k8s-f9q69" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.221963 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/a3c67adc-f296-4e37-a023-1c478d8abcd7-reloader\") pod \"frr-k8s-f9q69\" (UID: \"a3c67adc-f296-4e37-a023-1c478d8abcd7\") " pod="metallb-system/frr-k8s-f9q69" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.222162 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/a3c67adc-f296-4e37-a023-1c478d8abcd7-frr-conf\") pod \"frr-k8s-f9q69\" (UID: \"a3c67adc-f296-4e37-a023-1c478d8abcd7\") " pod="metallb-system/frr-k8s-f9q69" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.222450 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/a3c67adc-f296-4e37-a023-1c478d8abcd7-frr-startup\") pod \"frr-k8s-f9q69\" (UID: \"a3c67adc-f296-4e37-a023-1c478d8abcd7\") " pod="metallb-system/frr-k8s-f9q69" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.231657 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/570de8a9-89ac-41e7-973c-d76485b7f41d-cert\") pod \"frr-k8s-webhook-server-6998585d5-52mph\" (UID: \"570de8a9-89ac-41e7-973c-d76485b7f41d\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-52mph" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.245895 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cmfps\" (UniqueName: \"kubernetes.io/projected/570de8a9-89ac-41e7-973c-d76485b7f41d-kube-api-access-cmfps\") pod \"frr-k8s-webhook-server-6998585d5-52mph\" (UID: \"570de8a9-89ac-41e7-973c-d76485b7f41d\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-52mph" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.246834 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j2m65\" (UniqueName: \"kubernetes.io/projected/a3c67adc-f296-4e37-a023-1c478d8abcd7-kube-api-access-j2m65\") pod \"frr-k8s-f9q69\" (UID: \"a3c67adc-f296-4e37-a023-1c478d8abcd7\") " pod="metallb-system/frr-k8s-f9q69" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.268820 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a3c67adc-f296-4e37-a023-1c478d8abcd7-metrics-certs\") pod \"frr-k8s-f9q69\" (UID: \"a3c67adc-f296-4e37-a023-1c478d8abcd7\") " pod="metallb-system/frr-k8s-f9q69" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.296592 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-f9q69" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.323446 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2fxf9\" (UniqueName: \"kubernetes.io/projected/886977c7-abf1-4a92-8d3f-3c14fc0ea0f1-kube-api-access-2fxf9\") pod \"speaker-zlgw9\" (UID: \"886977c7-abf1-4a92-8d3f-3c14fc0ea0f1\") " pod="metallb-system/speaker-zlgw9" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.323652 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/886977c7-abf1-4a92-8d3f-3c14fc0ea0f1-metrics-certs\") pod \"speaker-zlgw9\" (UID: \"886977c7-abf1-4a92-8d3f-3c14fc0ea0f1\") " pod="metallb-system/speaker-zlgw9" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.323710 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/52bdf1ca-71ae-4bb7-a4e9-593da3d912cf-cert\") pod \"controller-6c7b4b5f48-dx66b\" (UID: \"52bdf1ca-71ae-4bb7-a4e9-593da3d912cf\") " pod="metallb-system/controller-6c7b4b5f48-dx66b" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.323763 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/886977c7-abf1-4a92-8d3f-3c14fc0ea0f1-metallb-excludel2\") pod \"speaker-zlgw9\" (UID: \"886977c7-abf1-4a92-8d3f-3c14fc0ea0f1\") " pod="metallb-system/speaker-zlgw9" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.323797 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jrzdd\" (UniqueName: \"kubernetes.io/projected/52bdf1ca-71ae-4bb7-a4e9-593da3d912cf-kube-api-access-jrzdd\") pod \"controller-6c7b4b5f48-dx66b\" (UID: \"52bdf1ca-71ae-4bb7-a4e9-593da3d912cf\") " pod="metallb-system/controller-6c7b4b5f48-dx66b" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.323827 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/886977c7-abf1-4a92-8d3f-3c14fc0ea0f1-memberlist\") pod \"speaker-zlgw9\" (UID: \"886977c7-abf1-4a92-8d3f-3c14fc0ea0f1\") " pod="metallb-system/speaker-zlgw9" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.323866 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/52bdf1ca-71ae-4bb7-a4e9-593da3d912cf-metrics-certs\") pod \"controller-6c7b4b5f48-dx66b\" (UID: \"52bdf1ca-71ae-4bb7-a4e9-593da3d912cf\") " pod="metallb-system/controller-6c7b4b5f48-dx66b" Nov 25 09:53:01 crc kubenswrapper[4854]: E1125 09:53:01.324356 4854 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 25 09:53:01 crc kubenswrapper[4854]: E1125 09:53:01.324435 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/886977c7-abf1-4a92-8d3f-3c14fc0ea0f1-memberlist podName:886977c7-abf1-4a92-8d3f-3c14fc0ea0f1 nodeName:}" failed. No retries permitted until 2025-11-25 09:53:01.824415737 +0000 UTC m=+987.677409113 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/886977c7-abf1-4a92-8d3f-3c14fc0ea0f1-memberlist") pod "speaker-zlgw9" (UID: "886977c7-abf1-4a92-8d3f-3c14fc0ea0f1") : secret "metallb-memberlist" not found Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.324783 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/886977c7-abf1-4a92-8d3f-3c14fc0ea0f1-metallb-excludel2\") pod \"speaker-zlgw9\" (UID: \"886977c7-abf1-4a92-8d3f-3c14fc0ea0f1\") " pod="metallb-system/speaker-zlgw9" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.326112 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-52mph" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.329111 4854 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.329347 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/886977c7-abf1-4a92-8d3f-3c14fc0ea0f1-metrics-certs\") pod \"speaker-zlgw9\" (UID: \"886977c7-abf1-4a92-8d3f-3c14fc0ea0f1\") " pod="metallb-system/speaker-zlgw9" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.329408 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/52bdf1ca-71ae-4bb7-a4e9-593da3d912cf-metrics-certs\") pod \"controller-6c7b4b5f48-dx66b\" (UID: \"52bdf1ca-71ae-4bb7-a4e9-593da3d912cf\") " pod="metallb-system/controller-6c7b4b5f48-dx66b" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.340960 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/52bdf1ca-71ae-4bb7-a4e9-593da3d912cf-cert\") pod \"controller-6c7b4b5f48-dx66b\" (UID: \"52bdf1ca-71ae-4bb7-a4e9-593da3d912cf\") " pod="metallb-system/controller-6c7b4b5f48-dx66b" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.343659 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jrzdd\" (UniqueName: \"kubernetes.io/projected/52bdf1ca-71ae-4bb7-a4e9-593da3d912cf-kube-api-access-jrzdd\") pod \"controller-6c7b4b5f48-dx66b\" (UID: \"52bdf1ca-71ae-4bb7-a4e9-593da3d912cf\") " pod="metallb-system/controller-6c7b4b5f48-dx66b" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.345881 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2fxf9\" (UniqueName: \"kubernetes.io/projected/886977c7-abf1-4a92-8d3f-3c14fc0ea0f1-kube-api-access-2fxf9\") pod \"speaker-zlgw9\" (UID: \"886977c7-abf1-4a92-8d3f-3c14fc0ea0f1\") " pod="metallb-system/speaker-zlgw9" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.425890 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-dx66b" Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.814841 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-52mph"] Nov 25 09:53:01 crc kubenswrapper[4854]: W1125 09:53:01.816665 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod570de8a9_89ac_41e7_973c_d76485b7f41d.slice/crio-456669079b9ed2ec38fcb8f9238fd59eaa782663b575bf213c32c91bc2a02583 WatchSource:0}: Error finding container 456669079b9ed2ec38fcb8f9238fd59eaa782663b575bf213c32c91bc2a02583: Status 404 returned error can't find the container with id 456669079b9ed2ec38fcb8f9238fd59eaa782663b575bf213c32c91bc2a02583 Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.837225 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/886977c7-abf1-4a92-8d3f-3c14fc0ea0f1-memberlist\") pod \"speaker-zlgw9\" (UID: \"886977c7-abf1-4a92-8d3f-3c14fc0ea0f1\") " pod="metallb-system/speaker-zlgw9" Nov 25 09:53:01 crc kubenswrapper[4854]: E1125 09:53:01.837382 4854 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 25 09:53:01 crc kubenswrapper[4854]: E1125 09:53:01.837443 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/886977c7-abf1-4a92-8d3f-3c14fc0ea0f1-memberlist podName:886977c7-abf1-4a92-8d3f-3c14fc0ea0f1 nodeName:}" failed. No retries permitted until 2025-11-25 09:53:02.837428401 +0000 UTC m=+988.690421777 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/886977c7-abf1-4a92-8d3f-3c14fc0ea0f1-memberlist") pod "speaker-zlgw9" (UID: "886977c7-abf1-4a92-8d3f-3c14fc0ea0f1") : secret "metallb-memberlist" not found Nov 25 09:53:01 crc kubenswrapper[4854]: I1125 09:53:01.915256 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-dx66b"] Nov 25 09:53:02 crc kubenswrapper[4854]: I1125 09:53:02.107803 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-dx66b" event={"ID":"52bdf1ca-71ae-4bb7-a4e9-593da3d912cf","Type":"ContainerStarted","Data":"5811cd62849b7a11a627dea9b014946a56adcf66212053dbe34ab83106518c6e"} Nov 25 09:53:02 crc kubenswrapper[4854]: I1125 09:53:02.107854 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-dx66b" event={"ID":"52bdf1ca-71ae-4bb7-a4e9-593da3d912cf","Type":"ContainerStarted","Data":"0055eed2ae476526edbb5f4a76347f4c2cfec0501691b0bb618a9f4802df13b1"} Nov 25 09:53:02 crc kubenswrapper[4854]: I1125 09:53:02.109475 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-52mph" event={"ID":"570de8a9-89ac-41e7-973c-d76485b7f41d","Type":"ContainerStarted","Data":"456669079b9ed2ec38fcb8f9238fd59eaa782663b575bf213c32c91bc2a02583"} Nov 25 09:53:02 crc kubenswrapper[4854]: I1125 09:53:02.110849 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-f9q69" event={"ID":"a3c67adc-f296-4e37-a023-1c478d8abcd7","Type":"ContainerStarted","Data":"afbfda824e152a27427a3c474aaea5ac3ee729b6aef0ad4fd958cf8b9b63408a"} Nov 25 09:53:02 crc kubenswrapper[4854]: I1125 09:53:02.854846 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/886977c7-abf1-4a92-8d3f-3c14fc0ea0f1-memberlist\") pod \"speaker-zlgw9\" (UID: \"886977c7-abf1-4a92-8d3f-3c14fc0ea0f1\") " pod="metallb-system/speaker-zlgw9" Nov 25 09:53:02 crc kubenswrapper[4854]: I1125 09:53:02.876315 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/886977c7-abf1-4a92-8d3f-3c14fc0ea0f1-memberlist\") pod \"speaker-zlgw9\" (UID: \"886977c7-abf1-4a92-8d3f-3c14fc0ea0f1\") " pod="metallb-system/speaker-zlgw9" Nov 25 09:53:02 crc kubenswrapper[4854]: I1125 09:53:02.893122 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-zlgw9" Nov 25 09:53:02 crc kubenswrapper[4854]: W1125 09:53:02.924972 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod886977c7_abf1_4a92_8d3f_3c14fc0ea0f1.slice/crio-a3839eacd7e06a45947cd7cedab9a3165ffefc091d04ee0bf7086c9b843e7766 WatchSource:0}: Error finding container a3839eacd7e06a45947cd7cedab9a3165ffefc091d04ee0bf7086c9b843e7766: Status 404 returned error can't find the container with id a3839eacd7e06a45947cd7cedab9a3165ffefc091d04ee0bf7086c9b843e7766 Nov 25 09:53:03 crc kubenswrapper[4854]: I1125 09:53:03.125630 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-dx66b" event={"ID":"52bdf1ca-71ae-4bb7-a4e9-593da3d912cf","Type":"ContainerStarted","Data":"c194fdfdece6bb46ef9e8cc6ef4504a1de4a4ece587afcf67be80dc9ec9daadf"} Nov 25 09:53:03 crc kubenswrapper[4854]: I1125 09:53:03.125863 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6c7b4b5f48-dx66b" Nov 25 09:53:03 crc kubenswrapper[4854]: I1125 09:53:03.129850 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-zlgw9" event={"ID":"886977c7-abf1-4a92-8d3f-3c14fc0ea0f1","Type":"ContainerStarted","Data":"a3839eacd7e06a45947cd7cedab9a3165ffefc091d04ee0bf7086c9b843e7766"} Nov 25 09:53:03 crc kubenswrapper[4854]: I1125 09:53:03.162032 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6c7b4b5f48-dx66b" podStartSLOduration=2.162016977 podStartE2EDuration="2.162016977s" podCreationTimestamp="2025-11-25 09:53:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:53:03.161401261 +0000 UTC m=+989.014394637" watchObservedRunningTime="2025-11-25 09:53:03.162016977 +0000 UTC m=+989.015010343" Nov 25 09:53:04 crc kubenswrapper[4854]: I1125 09:53:04.139275 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-zlgw9" event={"ID":"886977c7-abf1-4a92-8d3f-3c14fc0ea0f1","Type":"ContainerStarted","Data":"9925c352fc4baaf48467067a04ff21115c5e63122ba283996cd01feca1d20b8f"} Nov 25 09:53:04 crc kubenswrapper[4854]: I1125 09:53:04.139660 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-zlgw9" event={"ID":"886977c7-abf1-4a92-8d3f-3c14fc0ea0f1","Type":"ContainerStarted","Data":"de36963a3b049bf4ed1483f61f050451a3f80f4a20adcb66c97da475aeae06ee"} Nov 25 09:53:04 crc kubenswrapper[4854]: I1125 09:53:04.139717 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-zlgw9" Nov 25 09:53:04 crc kubenswrapper[4854]: I1125 09:53:04.157146 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-zlgw9" podStartSLOduration=3.157125637 podStartE2EDuration="3.157125637s" podCreationTimestamp="2025-11-25 09:53:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:53:04.152960672 +0000 UTC m=+990.005954058" watchObservedRunningTime="2025-11-25 09:53:04.157125637 +0000 UTC m=+990.010119003" Nov 25 09:53:11 crc kubenswrapper[4854]: I1125 09:53:11.205229 4854 generic.go:334] "Generic (PLEG): container finished" podID="a3c67adc-f296-4e37-a023-1c478d8abcd7" containerID="42097eefb584e846accd73ce94b3b9bd1e6bd2c83946dfcf81650039073580bf" exitCode=0 Nov 25 09:53:11 crc kubenswrapper[4854]: I1125 09:53:11.205476 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-f9q69" event={"ID":"a3c67adc-f296-4e37-a023-1c478d8abcd7","Type":"ContainerDied","Data":"42097eefb584e846accd73ce94b3b9bd1e6bd2c83946dfcf81650039073580bf"} Nov 25 09:53:11 crc kubenswrapper[4854]: I1125 09:53:11.208593 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-52mph" event={"ID":"570de8a9-89ac-41e7-973c-d76485b7f41d","Type":"ContainerStarted","Data":"94d803f4924751adb1a8c2a7b6fd548d9df90a198980bdb76e4e3c04a90399d0"} Nov 25 09:53:11 crc kubenswrapper[4854]: I1125 09:53:11.208724 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-6998585d5-52mph" Nov 25 09:53:11 crc kubenswrapper[4854]: I1125 09:53:11.298911 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-6998585d5-52mph" podStartSLOduration=2.880206287 podStartE2EDuration="11.298889564s" podCreationTimestamp="2025-11-25 09:53:00 +0000 UTC" firstStartedPulling="2025-11-25 09:53:01.819309334 +0000 UTC m=+987.672302700" lastFinishedPulling="2025-11-25 09:53:10.237992591 +0000 UTC m=+996.090985977" observedRunningTime="2025-11-25 09:53:11.294781601 +0000 UTC m=+997.147774987" watchObservedRunningTime="2025-11-25 09:53:11.298889564 +0000 UTC m=+997.151882940" Nov 25 09:53:12 crc kubenswrapper[4854]: I1125 09:53:12.217892 4854 generic.go:334] "Generic (PLEG): container finished" podID="a3c67adc-f296-4e37-a023-1c478d8abcd7" containerID="296e94b7df0cdba3636f7b8c948babe78a3a00a07e028c743eec1c77d8f3833f" exitCode=0 Nov 25 09:53:12 crc kubenswrapper[4854]: I1125 09:53:12.218011 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-f9q69" event={"ID":"a3c67adc-f296-4e37-a023-1c478d8abcd7","Type":"ContainerDied","Data":"296e94b7df0cdba3636f7b8c948babe78a3a00a07e028c743eec1c77d8f3833f"} Nov 25 09:53:13 crc kubenswrapper[4854]: I1125 09:53:13.240372 4854 generic.go:334] "Generic (PLEG): container finished" podID="a3c67adc-f296-4e37-a023-1c478d8abcd7" containerID="a5e9ddbbc385a38bef9fe572aa217c69abd04e87548df6063070ad9e05d57c3f" exitCode=0 Nov 25 09:53:13 crc kubenswrapper[4854]: I1125 09:53:13.240463 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-f9q69" event={"ID":"a3c67adc-f296-4e37-a023-1c478d8abcd7","Type":"ContainerDied","Data":"a5e9ddbbc385a38bef9fe572aa217c69abd04e87548df6063070ad9e05d57c3f"} Nov 25 09:53:14 crc kubenswrapper[4854]: I1125 09:53:14.270619 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-f9q69" event={"ID":"a3c67adc-f296-4e37-a023-1c478d8abcd7","Type":"ContainerStarted","Data":"ceec55447449482fbd9a55eccad342a0a0c9fc22db140be08494398fe16d3c24"} Nov 25 09:53:14 crc kubenswrapper[4854]: I1125 09:53:14.270898 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-f9q69" event={"ID":"a3c67adc-f296-4e37-a023-1c478d8abcd7","Type":"ContainerStarted","Data":"edf4dc5100afd1b17eab4a68b8c640a7f0d58e8033d200ffd96968e8b8389735"} Nov 25 09:53:14 crc kubenswrapper[4854]: I1125 09:53:14.270910 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-f9q69" event={"ID":"a3c67adc-f296-4e37-a023-1c478d8abcd7","Type":"ContainerStarted","Data":"854cb0912fc44ffb733af4f51d39020fa327a04f6415b6a760a8f09818af27f3"} Nov 25 09:53:14 crc kubenswrapper[4854]: I1125 09:53:14.270918 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-f9q69" event={"ID":"a3c67adc-f296-4e37-a023-1c478d8abcd7","Type":"ContainerStarted","Data":"27257e67a22682cdea9ab9bd1c23c5e99819e11042020dcd475c9093acd83375"} Nov 25 09:53:15 crc kubenswrapper[4854]: I1125 09:53:15.284584 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-f9q69" event={"ID":"a3c67adc-f296-4e37-a023-1c478d8abcd7","Type":"ContainerStarted","Data":"5ff849aa6b3b01c2a889eaa23da53019ca5b80875d57afe7f67b44669a3621fe"} Nov 25 09:53:15 crc kubenswrapper[4854]: I1125 09:53:15.285045 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-f9q69" Nov 25 09:53:15 crc kubenswrapper[4854]: I1125 09:53:15.285064 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-f9q69" event={"ID":"a3c67adc-f296-4e37-a023-1c478d8abcd7","Type":"ContainerStarted","Data":"84c5df3563b7f49c457ebe5f0655475284f67679169781bb309ff24a403a81aa"} Nov 25 09:53:15 crc kubenswrapper[4854]: I1125 09:53:15.305090 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-f9q69" podStartSLOduration=6.617157783 podStartE2EDuration="15.305071865s" podCreationTimestamp="2025-11-25 09:53:00 +0000 UTC" firstStartedPulling="2025-11-25 09:53:01.523658434 +0000 UTC m=+987.376651810" lastFinishedPulling="2025-11-25 09:53:10.211572516 +0000 UTC m=+996.064565892" observedRunningTime="2025-11-25 09:53:15.302411902 +0000 UTC m=+1001.155405278" watchObservedRunningTime="2025-11-25 09:53:15.305071865 +0000 UTC m=+1001.158065241" Nov 25 09:53:16 crc kubenswrapper[4854]: I1125 09:53:16.297930 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-f9q69" Nov 25 09:53:16 crc kubenswrapper[4854]: I1125 09:53:16.340215 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-f9q69" Nov 25 09:53:21 crc kubenswrapper[4854]: I1125 09:53:21.330738 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-6998585d5-52mph" Nov 25 09:53:21 crc kubenswrapper[4854]: I1125 09:53:21.431020 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6c7b4b5f48-dx66b" Nov 25 09:53:22 crc kubenswrapper[4854]: I1125 09:53:22.896750 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-zlgw9" Nov 25 09:53:25 crc kubenswrapper[4854]: I1125 09:53:25.029197 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:53:25 crc kubenswrapper[4854]: I1125 09:53:25.029502 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:53:25 crc kubenswrapper[4854]: I1125 09:53:25.029545 4854 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" Nov 25 09:53:25 crc kubenswrapper[4854]: I1125 09:53:25.030206 4854 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5e14519da63f04cdbdb7f55713d2722df29a5332d12866e5327a4659d36c5bcf"} pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:53:25 crc kubenswrapper[4854]: I1125 09:53:25.030274 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" containerID="cri-o://5e14519da63f04cdbdb7f55713d2722df29a5332d12866e5327a4659d36c5bcf" gracePeriod=600 Nov 25 09:53:25 crc kubenswrapper[4854]: I1125 09:53:25.367198 4854 generic.go:334] "Generic (PLEG): container finished" podID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerID="5e14519da63f04cdbdb7f55713d2722df29a5332d12866e5327a4659d36c5bcf" exitCode=0 Nov 25 09:53:25 crc kubenswrapper[4854]: I1125 09:53:25.367274 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerDied","Data":"5e14519da63f04cdbdb7f55713d2722df29a5332d12866e5327a4659d36c5bcf"} Nov 25 09:53:25 crc kubenswrapper[4854]: I1125 09:53:25.367939 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerStarted","Data":"8c88dce0ea083d0b4318356bc4c4cafd9ff804af077bca2201c157b710b82d4d"} Nov 25 09:53:25 crc kubenswrapper[4854]: I1125 09:53:25.367988 4854 scope.go:117] "RemoveContainer" containerID="9b86f8830130949aa485656adb170193fb5c4c66ab6f65d45cd6ab7997ce2f21" Nov 25 09:53:25 crc kubenswrapper[4854]: I1125 09:53:25.984176 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-q4lh2"] Nov 25 09:53:25 crc kubenswrapper[4854]: I1125 09:53:25.985442 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-q4lh2" Nov 25 09:53:25 crc kubenswrapper[4854]: I1125 09:53:25.990473 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 25 09:53:25 crc kubenswrapper[4854]: I1125 09:53:25.990578 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-pw2zr" Nov 25 09:53:25 crc kubenswrapper[4854]: I1125 09:53:25.999695 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-q4lh2"] Nov 25 09:53:26 crc kubenswrapper[4854]: I1125 09:53:26.000655 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 25 09:53:26 crc kubenswrapper[4854]: I1125 09:53:26.149548 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h9tb9\" (UniqueName: \"kubernetes.io/projected/f869fd75-6e46-42ac-80e4-ffa238353fb9-kube-api-access-h9tb9\") pod \"openstack-operator-index-q4lh2\" (UID: \"f869fd75-6e46-42ac-80e4-ffa238353fb9\") " pod="openstack-operators/openstack-operator-index-q4lh2" Nov 25 09:53:26 crc kubenswrapper[4854]: I1125 09:53:26.251817 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h9tb9\" (UniqueName: \"kubernetes.io/projected/f869fd75-6e46-42ac-80e4-ffa238353fb9-kube-api-access-h9tb9\") pod \"openstack-operator-index-q4lh2\" (UID: \"f869fd75-6e46-42ac-80e4-ffa238353fb9\") " pod="openstack-operators/openstack-operator-index-q4lh2" Nov 25 09:53:26 crc kubenswrapper[4854]: I1125 09:53:26.274735 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h9tb9\" (UniqueName: \"kubernetes.io/projected/f869fd75-6e46-42ac-80e4-ffa238353fb9-kube-api-access-h9tb9\") pod \"openstack-operator-index-q4lh2\" (UID: \"f869fd75-6e46-42ac-80e4-ffa238353fb9\") " pod="openstack-operators/openstack-operator-index-q4lh2" Nov 25 09:53:26 crc kubenswrapper[4854]: I1125 09:53:26.360441 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-q4lh2" Nov 25 09:53:26 crc kubenswrapper[4854]: I1125 09:53:26.783305 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-q4lh2"] Nov 25 09:53:26 crc kubenswrapper[4854]: W1125 09:53:26.793175 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf869fd75_6e46_42ac_80e4_ffa238353fb9.slice/crio-25a34606f3c7237397d53c1cb5bfac5930bb416dd65eabde50bf564d0b8c857e WatchSource:0}: Error finding container 25a34606f3c7237397d53c1cb5bfac5930bb416dd65eabde50bf564d0b8c857e: Status 404 returned error can't find the container with id 25a34606f3c7237397d53c1cb5bfac5930bb416dd65eabde50bf564d0b8c857e Nov 25 09:53:27 crc kubenswrapper[4854]: I1125 09:53:27.383984 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-q4lh2" event={"ID":"f869fd75-6e46-42ac-80e4-ffa238353fb9","Type":"ContainerStarted","Data":"25a34606f3c7237397d53c1cb5bfac5930bb416dd65eabde50bf564d0b8c857e"} Nov 25 09:53:29 crc kubenswrapper[4854]: I1125 09:53:29.363141 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-q4lh2"] Nov 25 09:53:29 crc kubenswrapper[4854]: I1125 09:53:29.977682 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-68sqj"] Nov 25 09:53:29 crc kubenswrapper[4854]: I1125 09:53:29.979196 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-68sqj" Nov 25 09:53:29 crc kubenswrapper[4854]: I1125 09:53:29.986571 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-68sqj"] Nov 25 09:53:30 crc kubenswrapper[4854]: I1125 09:53:30.117713 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2n4cv\" (UniqueName: \"kubernetes.io/projected/0aca6f91-76ff-47fe-957a-d03494fa8d99-kube-api-access-2n4cv\") pod \"openstack-operator-index-68sqj\" (UID: \"0aca6f91-76ff-47fe-957a-d03494fa8d99\") " pod="openstack-operators/openstack-operator-index-68sqj" Nov 25 09:53:30 crc kubenswrapper[4854]: I1125 09:53:30.219748 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2n4cv\" (UniqueName: \"kubernetes.io/projected/0aca6f91-76ff-47fe-957a-d03494fa8d99-kube-api-access-2n4cv\") pod \"openstack-operator-index-68sqj\" (UID: \"0aca6f91-76ff-47fe-957a-d03494fa8d99\") " pod="openstack-operators/openstack-operator-index-68sqj" Nov 25 09:53:30 crc kubenswrapper[4854]: I1125 09:53:30.237418 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2n4cv\" (UniqueName: \"kubernetes.io/projected/0aca6f91-76ff-47fe-957a-d03494fa8d99-kube-api-access-2n4cv\") pod \"openstack-operator-index-68sqj\" (UID: \"0aca6f91-76ff-47fe-957a-d03494fa8d99\") " pod="openstack-operators/openstack-operator-index-68sqj" Nov 25 09:53:30 crc kubenswrapper[4854]: I1125 09:53:30.306461 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-68sqj" Nov 25 09:53:31 crc kubenswrapper[4854]: I1125 09:53:31.303355 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-f9q69" Nov 25 09:53:32 crc kubenswrapper[4854]: I1125 09:53:32.397174 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-68sqj"] Nov 25 09:53:33 crc kubenswrapper[4854]: I1125 09:53:33.437524 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-q4lh2" event={"ID":"f869fd75-6e46-42ac-80e4-ffa238353fb9","Type":"ContainerStarted","Data":"a446f4ec698649006e067836f90edbab30efe857a42eebd8ca818aee24311f80"} Nov 25 09:53:33 crc kubenswrapper[4854]: I1125 09:53:33.439791 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-68sqj" event={"ID":"0aca6f91-76ff-47fe-957a-d03494fa8d99","Type":"ContainerStarted","Data":"f8da60a311241eec734cb2774440e67359c57225cba0f69868c24f519aa27278"} Nov 25 09:53:33 crc kubenswrapper[4854]: I1125 09:53:33.437664 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-q4lh2" podUID="f869fd75-6e46-42ac-80e4-ffa238353fb9" containerName="registry-server" containerID="cri-o://a446f4ec698649006e067836f90edbab30efe857a42eebd8ca818aee24311f80" gracePeriod=2 Nov 25 09:53:33 crc kubenswrapper[4854]: I1125 09:53:33.439907 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-68sqj" event={"ID":"0aca6f91-76ff-47fe-957a-d03494fa8d99","Type":"ContainerStarted","Data":"29c45fe8ae9f83f75eab3d670b497724ae4dd1f672cf973b50bd19278a85e7f9"} Nov 25 09:53:33 crc kubenswrapper[4854]: I1125 09:53:33.461582 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-q4lh2" podStartSLOduration=2.484313498 podStartE2EDuration="8.461550399s" podCreationTimestamp="2025-11-25 09:53:25 +0000 UTC" firstStartedPulling="2025-11-25 09:53:26.795303266 +0000 UTC m=+1012.648296642" lastFinishedPulling="2025-11-25 09:53:32.772540157 +0000 UTC m=+1018.625533543" observedRunningTime="2025-11-25 09:53:33.459007699 +0000 UTC m=+1019.312001075" watchObservedRunningTime="2025-11-25 09:53:33.461550399 +0000 UTC m=+1019.314543805" Nov 25 09:53:33 crc kubenswrapper[4854]: I1125 09:53:33.477255 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-68sqj" podStartSLOduration=4.087546939 podStartE2EDuration="4.477230299s" podCreationTimestamp="2025-11-25 09:53:29 +0000 UTC" firstStartedPulling="2025-11-25 09:53:32.75585956 +0000 UTC m=+1018.608852926" lastFinishedPulling="2025-11-25 09:53:33.14554291 +0000 UTC m=+1018.998536286" observedRunningTime="2025-11-25 09:53:33.475000308 +0000 UTC m=+1019.327993674" watchObservedRunningTime="2025-11-25 09:53:33.477230299 +0000 UTC m=+1019.330223695" Nov 25 09:53:34 crc kubenswrapper[4854]: I1125 09:53:34.405247 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-q4lh2" Nov 25 09:53:34 crc kubenswrapper[4854]: I1125 09:53:34.464368 4854 generic.go:334] "Generic (PLEG): container finished" podID="f869fd75-6e46-42ac-80e4-ffa238353fb9" containerID="a446f4ec698649006e067836f90edbab30efe857a42eebd8ca818aee24311f80" exitCode=0 Nov 25 09:53:34 crc kubenswrapper[4854]: I1125 09:53:34.464433 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-q4lh2" Nov 25 09:53:34 crc kubenswrapper[4854]: I1125 09:53:34.464472 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-q4lh2" event={"ID":"f869fd75-6e46-42ac-80e4-ffa238353fb9","Type":"ContainerDied","Data":"a446f4ec698649006e067836f90edbab30efe857a42eebd8ca818aee24311f80"} Nov 25 09:53:34 crc kubenswrapper[4854]: I1125 09:53:34.464517 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-q4lh2" event={"ID":"f869fd75-6e46-42ac-80e4-ffa238353fb9","Type":"ContainerDied","Data":"25a34606f3c7237397d53c1cb5bfac5930bb416dd65eabde50bf564d0b8c857e"} Nov 25 09:53:34 crc kubenswrapper[4854]: I1125 09:53:34.464535 4854 scope.go:117] "RemoveContainer" containerID="a446f4ec698649006e067836f90edbab30efe857a42eebd8ca818aee24311f80" Nov 25 09:53:34 crc kubenswrapper[4854]: I1125 09:53:34.485022 4854 scope.go:117] "RemoveContainer" containerID="a446f4ec698649006e067836f90edbab30efe857a42eebd8ca818aee24311f80" Nov 25 09:53:34 crc kubenswrapper[4854]: E1125 09:53:34.485562 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a446f4ec698649006e067836f90edbab30efe857a42eebd8ca818aee24311f80\": container with ID starting with a446f4ec698649006e067836f90edbab30efe857a42eebd8ca818aee24311f80 not found: ID does not exist" containerID="a446f4ec698649006e067836f90edbab30efe857a42eebd8ca818aee24311f80" Nov 25 09:53:34 crc kubenswrapper[4854]: I1125 09:53:34.485605 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a446f4ec698649006e067836f90edbab30efe857a42eebd8ca818aee24311f80"} err="failed to get container status \"a446f4ec698649006e067836f90edbab30efe857a42eebd8ca818aee24311f80\": rpc error: code = NotFound desc = could not find container \"a446f4ec698649006e067836f90edbab30efe857a42eebd8ca818aee24311f80\": container with ID starting with a446f4ec698649006e067836f90edbab30efe857a42eebd8ca818aee24311f80 not found: ID does not exist" Nov 25 09:53:34 crc kubenswrapper[4854]: I1125 09:53:34.486497 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h9tb9\" (UniqueName: \"kubernetes.io/projected/f869fd75-6e46-42ac-80e4-ffa238353fb9-kube-api-access-h9tb9\") pod \"f869fd75-6e46-42ac-80e4-ffa238353fb9\" (UID: \"f869fd75-6e46-42ac-80e4-ffa238353fb9\") " Nov 25 09:53:34 crc kubenswrapper[4854]: I1125 09:53:34.492664 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f869fd75-6e46-42ac-80e4-ffa238353fb9-kube-api-access-h9tb9" (OuterVolumeSpecName: "kube-api-access-h9tb9") pod "f869fd75-6e46-42ac-80e4-ffa238353fb9" (UID: "f869fd75-6e46-42ac-80e4-ffa238353fb9"). InnerVolumeSpecName "kube-api-access-h9tb9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:53:34 crc kubenswrapper[4854]: I1125 09:53:34.588429 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h9tb9\" (UniqueName: \"kubernetes.io/projected/f869fd75-6e46-42ac-80e4-ffa238353fb9-kube-api-access-h9tb9\") on node \"crc\" DevicePath \"\"" Nov 25 09:53:34 crc kubenswrapper[4854]: I1125 09:53:34.792199 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-q4lh2"] Nov 25 09:53:34 crc kubenswrapper[4854]: I1125 09:53:34.798864 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-q4lh2"] Nov 25 09:53:35 crc kubenswrapper[4854]: I1125 09:53:35.026071 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f869fd75-6e46-42ac-80e4-ffa238353fb9" path="/var/lib/kubelet/pods/f869fd75-6e46-42ac-80e4-ffa238353fb9/volumes" Nov 25 09:53:40 crc kubenswrapper[4854]: I1125 09:53:40.306737 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-68sqj" Nov 25 09:53:40 crc kubenswrapper[4854]: I1125 09:53:40.307367 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-68sqj" Nov 25 09:53:40 crc kubenswrapper[4854]: I1125 09:53:40.336817 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-68sqj" Nov 25 09:53:40 crc kubenswrapper[4854]: I1125 09:53:40.541955 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-68sqj" Nov 25 09:53:42 crc kubenswrapper[4854]: I1125 09:53:42.204404 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/09934228e4d1df511f36c8a9f9f2f97d90660d042fb18f75ceeb82c7f9v24mk"] Nov 25 09:53:42 crc kubenswrapper[4854]: E1125 09:53:42.206405 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f869fd75-6e46-42ac-80e4-ffa238353fb9" containerName="registry-server" Nov 25 09:53:42 crc kubenswrapper[4854]: I1125 09:53:42.206535 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="f869fd75-6e46-42ac-80e4-ffa238353fb9" containerName="registry-server" Nov 25 09:53:42 crc kubenswrapper[4854]: I1125 09:53:42.207121 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="f869fd75-6e46-42ac-80e4-ffa238353fb9" containerName="registry-server" Nov 25 09:53:42 crc kubenswrapper[4854]: I1125 09:53:42.209857 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/09934228e4d1df511f36c8a9f9f2f97d90660d042fb18f75ceeb82c7f9v24mk" Nov 25 09:53:42 crc kubenswrapper[4854]: I1125 09:53:42.214355 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-8kccz" Nov 25 09:53:42 crc kubenswrapper[4854]: I1125 09:53:42.239602 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/09934228e4d1df511f36c8a9f9f2f97d90660d042fb18f75ceeb82c7f9v24mk"] Nov 25 09:53:42 crc kubenswrapper[4854]: I1125 09:53:42.319949 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7-util\") pod \"09934228e4d1df511f36c8a9f9f2f97d90660d042fb18f75ceeb82c7f9v24mk\" (UID: \"d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7\") " pod="openstack-operators/09934228e4d1df511f36c8a9f9f2f97d90660d042fb18f75ceeb82c7f9v24mk" Nov 25 09:53:42 crc kubenswrapper[4854]: I1125 09:53:42.320043 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dm9bt\" (UniqueName: \"kubernetes.io/projected/d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7-kube-api-access-dm9bt\") pod \"09934228e4d1df511f36c8a9f9f2f97d90660d042fb18f75ceeb82c7f9v24mk\" (UID: \"d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7\") " pod="openstack-operators/09934228e4d1df511f36c8a9f9f2f97d90660d042fb18f75ceeb82c7f9v24mk" Nov 25 09:53:42 crc kubenswrapper[4854]: I1125 09:53:42.320087 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7-bundle\") pod \"09934228e4d1df511f36c8a9f9f2f97d90660d042fb18f75ceeb82c7f9v24mk\" (UID: \"d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7\") " pod="openstack-operators/09934228e4d1df511f36c8a9f9f2f97d90660d042fb18f75ceeb82c7f9v24mk" Nov 25 09:53:42 crc kubenswrapper[4854]: I1125 09:53:42.421920 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7-util\") pod \"09934228e4d1df511f36c8a9f9f2f97d90660d042fb18f75ceeb82c7f9v24mk\" (UID: \"d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7\") " pod="openstack-operators/09934228e4d1df511f36c8a9f9f2f97d90660d042fb18f75ceeb82c7f9v24mk" Nov 25 09:53:42 crc kubenswrapper[4854]: I1125 09:53:42.422037 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dm9bt\" (UniqueName: \"kubernetes.io/projected/d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7-kube-api-access-dm9bt\") pod \"09934228e4d1df511f36c8a9f9f2f97d90660d042fb18f75ceeb82c7f9v24mk\" (UID: \"d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7\") " pod="openstack-operators/09934228e4d1df511f36c8a9f9f2f97d90660d042fb18f75ceeb82c7f9v24mk" Nov 25 09:53:42 crc kubenswrapper[4854]: I1125 09:53:42.422080 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7-bundle\") pod \"09934228e4d1df511f36c8a9f9f2f97d90660d042fb18f75ceeb82c7f9v24mk\" (UID: \"d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7\") " pod="openstack-operators/09934228e4d1df511f36c8a9f9f2f97d90660d042fb18f75ceeb82c7f9v24mk" Nov 25 09:53:42 crc kubenswrapper[4854]: I1125 09:53:42.422443 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7-util\") pod \"09934228e4d1df511f36c8a9f9f2f97d90660d042fb18f75ceeb82c7f9v24mk\" (UID: \"d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7\") " pod="openstack-operators/09934228e4d1df511f36c8a9f9f2f97d90660d042fb18f75ceeb82c7f9v24mk" Nov 25 09:53:42 crc kubenswrapper[4854]: I1125 09:53:42.422574 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7-bundle\") pod \"09934228e4d1df511f36c8a9f9f2f97d90660d042fb18f75ceeb82c7f9v24mk\" (UID: \"d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7\") " pod="openstack-operators/09934228e4d1df511f36c8a9f9f2f97d90660d042fb18f75ceeb82c7f9v24mk" Nov 25 09:53:42 crc kubenswrapper[4854]: I1125 09:53:42.445558 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dm9bt\" (UniqueName: \"kubernetes.io/projected/d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7-kube-api-access-dm9bt\") pod \"09934228e4d1df511f36c8a9f9f2f97d90660d042fb18f75ceeb82c7f9v24mk\" (UID: \"d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7\") " pod="openstack-operators/09934228e4d1df511f36c8a9f9f2f97d90660d042fb18f75ceeb82c7f9v24mk" Nov 25 09:53:42 crc kubenswrapper[4854]: I1125 09:53:42.533595 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/09934228e4d1df511f36c8a9f9f2f97d90660d042fb18f75ceeb82c7f9v24mk" Nov 25 09:53:42 crc kubenswrapper[4854]: I1125 09:53:42.956150 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/09934228e4d1df511f36c8a9f9f2f97d90660d042fb18f75ceeb82c7f9v24mk"] Nov 25 09:53:43 crc kubenswrapper[4854]: I1125 09:53:43.537768 4854 generic.go:334] "Generic (PLEG): container finished" podID="d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7" containerID="0774a182157e8ea7f505a3f5be607ba8b071a569c2c26bbaf82f3114e549a0b1" exitCode=0 Nov 25 09:53:43 crc kubenswrapper[4854]: I1125 09:53:43.537874 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/09934228e4d1df511f36c8a9f9f2f97d90660d042fb18f75ceeb82c7f9v24mk" event={"ID":"d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7","Type":"ContainerDied","Data":"0774a182157e8ea7f505a3f5be607ba8b071a569c2c26bbaf82f3114e549a0b1"} Nov 25 09:53:43 crc kubenswrapper[4854]: I1125 09:53:43.538188 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/09934228e4d1df511f36c8a9f9f2f97d90660d042fb18f75ceeb82c7f9v24mk" event={"ID":"d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7","Type":"ContainerStarted","Data":"bcf4841070150ea50b373c4bbfd0ebc6d8c664f082e34f937be431d249507726"} Nov 25 09:53:44 crc kubenswrapper[4854]: I1125 09:53:44.546552 4854 generic.go:334] "Generic (PLEG): container finished" podID="d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7" containerID="477fd65a88cc32faf9b2e3a109f55398e3d97f9c38ba85c428de2744e602f979" exitCode=0 Nov 25 09:53:44 crc kubenswrapper[4854]: I1125 09:53:44.546589 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/09934228e4d1df511f36c8a9f9f2f97d90660d042fb18f75ceeb82c7f9v24mk" event={"ID":"d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7","Type":"ContainerDied","Data":"477fd65a88cc32faf9b2e3a109f55398e3d97f9c38ba85c428de2744e602f979"} Nov 25 09:53:45 crc kubenswrapper[4854]: I1125 09:53:45.558857 4854 generic.go:334] "Generic (PLEG): container finished" podID="d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7" containerID="0a2a4700770da6cb7a268859d81c8aef64c12fc57e80e162690c15128e137276" exitCode=0 Nov 25 09:53:45 crc kubenswrapper[4854]: I1125 09:53:45.558968 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/09934228e4d1df511f36c8a9f9f2f97d90660d042fb18f75ceeb82c7f9v24mk" event={"ID":"d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7","Type":"ContainerDied","Data":"0a2a4700770da6cb7a268859d81c8aef64c12fc57e80e162690c15128e137276"} Nov 25 09:53:47 crc kubenswrapper[4854]: I1125 09:53:47.025983 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/09934228e4d1df511f36c8a9f9f2f97d90660d042fb18f75ceeb82c7f9v24mk" Nov 25 09:53:47 crc kubenswrapper[4854]: I1125 09:53:47.110126 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7-bundle\") pod \"d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7\" (UID: \"d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7\") " Nov 25 09:53:47 crc kubenswrapper[4854]: I1125 09:53:47.110176 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dm9bt\" (UniqueName: \"kubernetes.io/projected/d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7-kube-api-access-dm9bt\") pod \"d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7\" (UID: \"d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7\") " Nov 25 09:53:47 crc kubenswrapper[4854]: I1125 09:53:47.110317 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7-util\") pod \"d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7\" (UID: \"d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7\") " Nov 25 09:53:47 crc kubenswrapper[4854]: I1125 09:53:47.110938 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7-bundle" (OuterVolumeSpecName: "bundle") pod "d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7" (UID: "d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:53:47 crc kubenswrapper[4854]: I1125 09:53:47.115081 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7-kube-api-access-dm9bt" (OuterVolumeSpecName: "kube-api-access-dm9bt") pod "d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7" (UID: "d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7"). InnerVolumeSpecName "kube-api-access-dm9bt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:53:47 crc kubenswrapper[4854]: I1125 09:53:47.126328 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7-util" (OuterVolumeSpecName: "util") pod "d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7" (UID: "d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:53:47 crc kubenswrapper[4854]: I1125 09:53:47.212264 4854 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:53:47 crc kubenswrapper[4854]: I1125 09:53:47.212314 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dm9bt\" (UniqueName: \"kubernetes.io/projected/d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7-kube-api-access-dm9bt\") on node \"crc\" DevicePath \"\"" Nov 25 09:53:47 crc kubenswrapper[4854]: I1125 09:53:47.212328 4854 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7-util\") on node \"crc\" DevicePath \"\"" Nov 25 09:53:47 crc kubenswrapper[4854]: I1125 09:53:47.577636 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/09934228e4d1df511f36c8a9f9f2f97d90660d042fb18f75ceeb82c7f9v24mk" event={"ID":"d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7","Type":"ContainerDied","Data":"bcf4841070150ea50b373c4bbfd0ebc6d8c664f082e34f937be431d249507726"} Nov 25 09:53:47 crc kubenswrapper[4854]: I1125 09:53:47.577706 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bcf4841070150ea50b373c4bbfd0ebc6d8c664f082e34f937be431d249507726" Nov 25 09:53:47 crc kubenswrapper[4854]: I1125 09:53:47.577789 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/09934228e4d1df511f36c8a9f9f2f97d90660d042fb18f75ceeb82c7f9v24mk" Nov 25 09:53:54 crc kubenswrapper[4854]: I1125 09:53:54.384800 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-674d4d8cb8-wcrmg"] Nov 25 09:53:54 crc kubenswrapper[4854]: E1125 09:53:54.385720 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7" containerName="extract" Nov 25 09:53:54 crc kubenswrapper[4854]: I1125 09:53:54.385736 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7" containerName="extract" Nov 25 09:53:54 crc kubenswrapper[4854]: E1125 09:53:54.385772 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7" containerName="pull" Nov 25 09:53:54 crc kubenswrapper[4854]: I1125 09:53:54.385780 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7" containerName="pull" Nov 25 09:53:54 crc kubenswrapper[4854]: E1125 09:53:54.385805 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7" containerName="util" Nov 25 09:53:54 crc kubenswrapper[4854]: I1125 09:53:54.385813 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7" containerName="util" Nov 25 09:53:54 crc kubenswrapper[4854]: I1125 09:53:54.385973 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7" containerName="extract" Nov 25 09:53:54 crc kubenswrapper[4854]: I1125 09:53:54.386652 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-674d4d8cb8-wcrmg" Nov 25 09:53:54 crc kubenswrapper[4854]: I1125 09:53:54.388548 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-wgm6c" Nov 25 09:53:54 crc kubenswrapper[4854]: I1125 09:53:54.430314 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cjllq\" (UniqueName: \"kubernetes.io/projected/61c604f4-d038-4563-b7a9-d14831788526-kube-api-access-cjllq\") pod \"openstack-operator-controller-operator-674d4d8cb8-wcrmg\" (UID: \"61c604f4-d038-4563-b7a9-d14831788526\") " pod="openstack-operators/openstack-operator-controller-operator-674d4d8cb8-wcrmg" Nov 25 09:53:54 crc kubenswrapper[4854]: I1125 09:53:54.431650 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-674d4d8cb8-wcrmg"] Nov 25 09:53:54 crc kubenswrapper[4854]: I1125 09:53:54.532346 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cjllq\" (UniqueName: \"kubernetes.io/projected/61c604f4-d038-4563-b7a9-d14831788526-kube-api-access-cjllq\") pod \"openstack-operator-controller-operator-674d4d8cb8-wcrmg\" (UID: \"61c604f4-d038-4563-b7a9-d14831788526\") " pod="openstack-operators/openstack-operator-controller-operator-674d4d8cb8-wcrmg" Nov 25 09:53:54 crc kubenswrapper[4854]: I1125 09:53:54.562999 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cjllq\" (UniqueName: \"kubernetes.io/projected/61c604f4-d038-4563-b7a9-d14831788526-kube-api-access-cjllq\") pod \"openstack-operator-controller-operator-674d4d8cb8-wcrmg\" (UID: \"61c604f4-d038-4563-b7a9-d14831788526\") " pod="openstack-operators/openstack-operator-controller-operator-674d4d8cb8-wcrmg" Nov 25 09:53:54 crc kubenswrapper[4854]: I1125 09:53:54.707823 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-674d4d8cb8-wcrmg" Nov 25 09:53:55 crc kubenswrapper[4854]: I1125 09:53:55.029488 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-674d4d8cb8-wcrmg"] Nov 25 09:53:55 crc kubenswrapper[4854]: I1125 09:53:55.640308 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-674d4d8cb8-wcrmg" event={"ID":"61c604f4-d038-4563-b7a9-d14831788526","Type":"ContainerStarted","Data":"3dd95b2a9434a8ae2e9d9d2d9da66bbc01b8441323b5b8073510077ea7f92c90"} Nov 25 09:54:00 crc kubenswrapper[4854]: I1125 09:54:00.688341 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-674d4d8cb8-wcrmg" event={"ID":"61c604f4-d038-4563-b7a9-d14831788526","Type":"ContainerStarted","Data":"c363e744e88dfc2768126d2797ee69c63b0efe152de2f7fa8ff36b58fc8e7ef6"} Nov 25 09:54:00 crc kubenswrapper[4854]: I1125 09:54:00.688992 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-674d4d8cb8-wcrmg" Nov 25 09:54:00 crc kubenswrapper[4854]: I1125 09:54:00.722858 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-674d4d8cb8-wcrmg" podStartSLOduration=1.93765241 podStartE2EDuration="6.722830991s" podCreationTimestamp="2025-11-25 09:53:54 +0000 UTC" firstStartedPulling="2025-11-25 09:53:55.056259231 +0000 UTC m=+1040.909252607" lastFinishedPulling="2025-11-25 09:53:59.841437812 +0000 UTC m=+1045.694431188" observedRunningTime="2025-11-25 09:54:00.717127265 +0000 UTC m=+1046.570120641" watchObservedRunningTime="2025-11-25 09:54:00.722830991 +0000 UTC m=+1046.575824367" Nov 25 09:54:14 crc kubenswrapper[4854]: I1125 09:54:14.711569 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-674d4d8cb8-wcrmg" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.266207 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-79856dc55c-vfv6r"] Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.269501 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-vfv6r" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.274656 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-qxgvc" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.276085 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-86dc4d89c8-pvjbb"] Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.277412 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-pvjbb" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.279332 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-g5gfk" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.284404 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-79856dc55c-vfv6r"] Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.306558 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-86dc4d89c8-pvjbb"] Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.314253 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hhjw8\" (UniqueName: \"kubernetes.io/projected/e0afee56-5768-44f9-af4d-da496a95ae39-kube-api-access-hhjw8\") pod \"cinder-operator-controller-manager-79856dc55c-vfv6r\" (UID: \"e0afee56-5768-44f9-af4d-da496a95ae39\") " pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-vfv6r" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.314372 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9b2tr\" (UniqueName: \"kubernetes.io/projected/df2830c6-0e67-4aea-b14d-101a5323617b-kube-api-access-9b2tr\") pod \"barbican-operator-controller-manager-86dc4d89c8-pvjbb\" (UID: \"df2830c6-0e67-4aea-b14d-101a5323617b\") " pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-pvjbb" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.353432 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-68b95954c9-n99lm"] Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.357114 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-n99lm" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.362821 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-2zpwh" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.374670 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-7d695c9b56-bjjdt"] Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.383388 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-bjjdt" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.391576 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-ftqqp" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.410956 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-7d695c9b56-bjjdt"] Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.421179 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9b2tr\" (UniqueName: \"kubernetes.io/projected/df2830c6-0e67-4aea-b14d-101a5323617b-kube-api-access-9b2tr\") pod \"barbican-operator-controller-manager-86dc4d89c8-pvjbb\" (UID: \"df2830c6-0e67-4aea-b14d-101a5323617b\") " pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-pvjbb" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.421346 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jckcp\" (UniqueName: \"kubernetes.io/projected/5a9dc90e-3728-42c9-897c-7b28035196bd-kube-api-access-jckcp\") pod \"designate-operator-controller-manager-7d695c9b56-bjjdt\" (UID: \"5a9dc90e-3728-42c9-897c-7b28035196bd\") " pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-bjjdt" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.421380 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hhjw8\" (UniqueName: \"kubernetes.io/projected/e0afee56-5768-44f9-af4d-da496a95ae39-kube-api-access-hhjw8\") pod \"cinder-operator-controller-manager-79856dc55c-vfv6r\" (UID: \"e0afee56-5768-44f9-af4d-da496a95ae39\") " pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-vfv6r" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.421418 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-szvmv\" (UniqueName: \"kubernetes.io/projected/5ed548fa-16b5-4733-b798-a0819bc8e77d-kube-api-access-szvmv\") pod \"glance-operator-controller-manager-68b95954c9-n99lm\" (UID: \"5ed548fa-16b5-4733-b798-a0819bc8e77d\") " pod="openstack-operators/glance-operator-controller-manager-68b95954c9-n99lm" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.459506 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9b2tr\" (UniqueName: \"kubernetes.io/projected/df2830c6-0e67-4aea-b14d-101a5323617b-kube-api-access-9b2tr\") pod \"barbican-operator-controller-manager-86dc4d89c8-pvjbb\" (UID: \"df2830c6-0e67-4aea-b14d-101a5323617b\") " pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-pvjbb" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.462806 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-68b95954c9-n99lm"] Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.463718 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hhjw8\" (UniqueName: \"kubernetes.io/projected/e0afee56-5768-44f9-af4d-da496a95ae39-kube-api-access-hhjw8\") pod \"cinder-operator-controller-manager-79856dc55c-vfv6r\" (UID: \"e0afee56-5768-44f9-af4d-da496a95ae39\") " pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-vfv6r" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.472909 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-774b86978c-zp5bz"] Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.474086 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-774b86978c-zp5bz" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.497522 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-dks6d" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.523339 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-szvmv\" (UniqueName: \"kubernetes.io/projected/5ed548fa-16b5-4733-b798-a0819bc8e77d-kube-api-access-szvmv\") pod \"glance-operator-controller-manager-68b95954c9-n99lm\" (UID: \"5ed548fa-16b5-4733-b798-a0819bc8e77d\") " pod="openstack-operators/glance-operator-controller-manager-68b95954c9-n99lm" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.523488 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-whgpn\" (UniqueName: \"kubernetes.io/projected/1da6b327-2319-4a92-9555-736d992a3348-kube-api-access-whgpn\") pod \"heat-operator-controller-manager-774b86978c-zp5bz\" (UID: \"1da6b327-2319-4a92-9555-736d992a3348\") " pod="openstack-operators/heat-operator-controller-manager-774b86978c-zp5bz" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.523582 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jckcp\" (UniqueName: \"kubernetes.io/projected/5a9dc90e-3728-42c9-897c-7b28035196bd-kube-api-access-jckcp\") pod \"designate-operator-controller-manager-7d695c9b56-bjjdt\" (UID: \"5a9dc90e-3728-42c9-897c-7b28035196bd\") " pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-bjjdt" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.530527 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-774b86978c-zp5bz"] Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.560134 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-d5cc86f4b-n9bk6"] Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.561751 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-n9bk6" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.566361 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-t45ds" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.566609 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.567082 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-szvmv\" (UniqueName: \"kubernetes.io/projected/5ed548fa-16b5-4733-b798-a0819bc8e77d-kube-api-access-szvmv\") pod \"glance-operator-controller-manager-68b95954c9-n99lm\" (UID: \"5ed548fa-16b5-4733-b798-a0819bc8e77d\") " pod="openstack-operators/glance-operator-controller-manager-68b95954c9-n99lm" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.576023 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jckcp\" (UniqueName: \"kubernetes.io/projected/5a9dc90e-3728-42c9-897c-7b28035196bd-kube-api-access-jckcp\") pod \"designate-operator-controller-manager-7d695c9b56-bjjdt\" (UID: \"5a9dc90e-3728-42c9-897c-7b28035196bd\") " pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-bjjdt" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.583537 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c9694994-jx54m"] Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.585516 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jx54m" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.591446 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-jjzjv" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.598347 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-vfv6r" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.602318 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5bfcdc958c-7l4fm"] Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.604021 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-7l4fm" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.620404 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-nlfhf" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.671016 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-pvjbb" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.672976 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-whgpn\" (UniqueName: \"kubernetes.io/projected/1da6b327-2319-4a92-9555-736d992a3348-kube-api-access-whgpn\") pod \"heat-operator-controller-manager-774b86978c-zp5bz\" (UID: \"1da6b327-2319-4a92-9555-736d992a3348\") " pod="openstack-operators/heat-operator-controller-manager-774b86978c-zp5bz" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.673047 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hkx99\" (UniqueName: \"kubernetes.io/projected/77de3154-620e-407a-97e9-94d3dd90ced7-kube-api-access-hkx99\") pod \"infra-operator-controller-manager-d5cc86f4b-n9bk6\" (UID: \"77de3154-620e-407a-97e9-94d3dd90ced7\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-n9bk6" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.673083 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n7csl\" (UniqueName: \"kubernetes.io/projected/13c658f9-13c2-43d5-9b8a-d30484e5943f-kube-api-access-n7csl\") pod \"horizon-operator-controller-manager-68c9694994-jx54m\" (UID: \"13c658f9-13c2-43d5-9b8a-d30484e5943f\") " pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jx54m" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.673124 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/77de3154-620e-407a-97e9-94d3dd90ced7-cert\") pod \"infra-operator-controller-manager-d5cc86f4b-n9bk6\" (UID: \"77de3154-620e-407a-97e9-94d3dd90ced7\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-n9bk6" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.682406 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-n99lm" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.782073 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-bjjdt" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.785703 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/77de3154-620e-407a-97e9-94d3dd90ced7-cert\") pod \"infra-operator-controller-manager-d5cc86f4b-n9bk6\" (UID: \"77de3154-620e-407a-97e9-94d3dd90ced7\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-n9bk6" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.785791 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pgchd\" (UniqueName: \"kubernetes.io/projected/5c0186e4-d72a-4281-ab41-d012f2d4d775-kube-api-access-pgchd\") pod \"ironic-operator-controller-manager-5bfcdc958c-7l4fm\" (UID: \"5c0186e4-d72a-4281-ab41-d012f2d4d775\") " pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-7l4fm" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.785860 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-whgpn\" (UniqueName: \"kubernetes.io/projected/1da6b327-2319-4a92-9555-736d992a3348-kube-api-access-whgpn\") pod \"heat-operator-controller-manager-774b86978c-zp5bz\" (UID: \"1da6b327-2319-4a92-9555-736d992a3348\") " pod="openstack-operators/heat-operator-controller-manager-774b86978c-zp5bz" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.785903 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hkx99\" (UniqueName: \"kubernetes.io/projected/77de3154-620e-407a-97e9-94d3dd90ced7-kube-api-access-hkx99\") pod \"infra-operator-controller-manager-d5cc86f4b-n9bk6\" (UID: \"77de3154-620e-407a-97e9-94d3dd90ced7\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-n9bk6" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.786042 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n7csl\" (UniqueName: \"kubernetes.io/projected/13c658f9-13c2-43d5-9b8a-d30484e5943f-kube-api-access-n7csl\") pod \"horizon-operator-controller-manager-68c9694994-jx54m\" (UID: \"13c658f9-13c2-43d5-9b8a-d30484e5943f\") " pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jx54m" Nov 25 09:54:32 crc kubenswrapper[4854]: E1125 09:54:32.786258 4854 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 25 09:54:32 crc kubenswrapper[4854]: E1125 09:54:32.786321 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/77de3154-620e-407a-97e9-94d3dd90ced7-cert podName:77de3154-620e-407a-97e9-94d3dd90ced7 nodeName:}" failed. No retries permitted until 2025-11-25 09:54:33.286298651 +0000 UTC m=+1079.139292107 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/77de3154-620e-407a-97e9-94d3dd90ced7-cert") pod "infra-operator-controller-manager-d5cc86f4b-n9bk6" (UID: "77de3154-620e-407a-97e9-94d3dd90ced7") : secret "infra-operator-webhook-server-cert" not found Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.793660 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-d5cc86f4b-n9bk6"] Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.805208 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-748dc6576f-sks98"] Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.811514 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hkx99\" (UniqueName: \"kubernetes.io/projected/77de3154-620e-407a-97e9-94d3dd90ced7-kube-api-access-hkx99\") pod \"infra-operator-controller-manager-d5cc86f4b-n9bk6\" (UID: \"77de3154-620e-407a-97e9-94d3dd90ced7\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-n9bk6" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.813391 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n7csl\" (UniqueName: \"kubernetes.io/projected/13c658f9-13c2-43d5-9b8a-d30484e5943f-kube-api-access-n7csl\") pod \"horizon-operator-controller-manager-68c9694994-jx54m\" (UID: \"13c658f9-13c2-43d5-9b8a-d30484e5943f\") " pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jx54m" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.817362 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-sks98" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.822303 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-2tsmm" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.823317 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c9694994-jx54m"] Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.833262 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5bfcdc958c-7l4fm"] Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.846447 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-748dc6576f-sks98"] Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.852168 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-774b86978c-zp5bz" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.871102 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-58bb8d67cc-4tvd5"] Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.877958 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-4tvd5" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.889256 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qr4mr\" (UniqueName: \"kubernetes.io/projected/d51e017c-9c85-443a-a5b9-b8b8969bb019-kube-api-access-qr4mr\") pod \"keystone-operator-controller-manager-748dc6576f-sks98\" (UID: \"d51e017c-9c85-443a-a5b9-b8b8969bb019\") " pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-sks98" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.889334 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pgchd\" (UniqueName: \"kubernetes.io/projected/5c0186e4-d72a-4281-ab41-d012f2d4d775-kube-api-access-pgchd\") pod \"ironic-operator-controller-manager-5bfcdc958c-7l4fm\" (UID: \"5c0186e4-d72a-4281-ab41-d012f2d4d775\") " pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-7l4fm" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.899117 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-dlq5q"] Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.899993 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-m8bzh" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.901502 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-dlq5q" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.905905 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-6cjjq" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.921082 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-9nqtc"] Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.925271 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pgchd\" (UniqueName: \"kubernetes.io/projected/5c0186e4-d72a-4281-ab41-d012f2d4d775-kube-api-access-pgchd\") pod \"ironic-operator-controller-manager-5bfcdc958c-7l4fm\" (UID: \"5c0186e4-d72a-4281-ab41-d012f2d4d775\") " pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-7l4fm" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.928713 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-9nqtc" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.935558 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-bphvc" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.941715 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-58bb8d67cc-4tvd5"] Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.952735 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-dlq5q"] Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.991021 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qr4mr\" (UniqueName: \"kubernetes.io/projected/d51e017c-9c85-443a-a5b9-b8b8969bb019-kube-api-access-qr4mr\") pod \"keystone-operator-controller-manager-748dc6576f-sks98\" (UID: \"d51e017c-9c85-443a-a5b9-b8b8969bb019\") " pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-sks98" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.991114 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4q77\" (UniqueName: \"kubernetes.io/projected/10550c56-b051-4d65-a13c-3854e40d2869-kube-api-access-q4q77\") pod \"mariadb-operator-controller-manager-cb6c4fdb7-dlq5q\" (UID: \"10550c56-b051-4d65-a13c-3854e40d2869\") " pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-dlq5q" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.991140 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ltbx2\" (UniqueName: \"kubernetes.io/projected/7cff163e-525b-4280-ab48-35eb6d6dd242-kube-api-access-ltbx2\") pod \"manila-operator-controller-manager-58bb8d67cc-4tvd5\" (UID: \"7cff163e-525b-4280-ab48-35eb6d6dd242\") " pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-4tvd5" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.991484 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-7bgzh"] Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.994134 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-7bgzh" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.997128 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-24ww2" Nov 25 09:54:32 crc kubenswrapper[4854]: I1125 09:54:32.998990 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-7bgzh"] Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.014228 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-9nqtc"] Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.031441 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qr4mr\" (UniqueName: \"kubernetes.io/projected/d51e017c-9c85-443a-a5b9-b8b8969bb019-kube-api-access-qr4mr\") pod \"keystone-operator-controller-manager-748dc6576f-sks98\" (UID: \"d51e017c-9c85-443a-a5b9-b8b8969bb019\") " pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-sks98" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.047243 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-fd75fd47d-wcjpf"] Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.049500 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-wcjpf" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.074185 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-vk7px" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.086181 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jx54m" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.094022 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jwkbv\" (UniqueName: \"kubernetes.io/projected/2b0d30d6-6825-4d29-b5bb-9ea86f790b6f-kube-api-access-jwkbv\") pod \"neutron-operator-controller-manager-7c57c8bbc4-9nqtc\" (UID: \"2b0d30d6-6825-4d29-b5bb-9ea86f790b6f\") " pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-9nqtc" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.094059 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4q77\" (UniqueName: \"kubernetes.io/projected/10550c56-b051-4d65-a13c-3854e40d2869-kube-api-access-q4q77\") pod \"mariadb-operator-controller-manager-cb6c4fdb7-dlq5q\" (UID: \"10550c56-b051-4d65-a13c-3854e40d2869\") " pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-dlq5q" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.094079 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jcvh2\" (UniqueName: \"kubernetes.io/projected/e3e51685-6e25-4bcd-8194-4faf0947962a-kube-api-access-jcvh2\") pod \"nova-operator-controller-manager-79556f57fc-7bgzh\" (UID: \"e3e51685-6e25-4bcd-8194-4faf0947962a\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-7bgzh" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.094100 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ltbx2\" (UniqueName: \"kubernetes.io/projected/7cff163e-525b-4280-ab48-35eb6d6dd242-kube-api-access-ltbx2\") pod \"manila-operator-controller-manager-58bb8d67cc-4tvd5\" (UID: \"7cff163e-525b-4280-ab48-35eb6d6dd242\") " pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-4tvd5" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.111341 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-fd75fd47d-wcjpf"] Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.116250 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-7l4fm" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.145621 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ltbx2\" (UniqueName: \"kubernetes.io/projected/7cff163e-525b-4280-ab48-35eb6d6dd242-kube-api-access-ltbx2\") pod \"manila-operator-controller-manager-58bb8d67cc-4tvd5\" (UID: \"7cff163e-525b-4280-ab48-35eb6d6dd242\") " pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-4tvd5" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.161387 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4q77\" (UniqueName: \"kubernetes.io/projected/10550c56-b051-4d65-a13c-3854e40d2869-kube-api-access-q4q77\") pod \"mariadb-operator-controller-manager-cb6c4fdb7-dlq5q\" (UID: \"10550c56-b051-4d65-a13c-3854e40d2869\") " pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-dlq5q" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.161806 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-sks98" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.186892 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-lcbwz"] Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.196353 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jwkbv\" (UniqueName: \"kubernetes.io/projected/2b0d30d6-6825-4d29-b5bb-9ea86f790b6f-kube-api-access-jwkbv\") pod \"neutron-operator-controller-manager-7c57c8bbc4-9nqtc\" (UID: \"2b0d30d6-6825-4d29-b5bb-9ea86f790b6f\") " pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-9nqtc" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.196407 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jcvh2\" (UniqueName: \"kubernetes.io/projected/e3e51685-6e25-4bcd-8194-4faf0947962a-kube-api-access-jcvh2\") pod \"nova-operator-controller-manager-79556f57fc-7bgzh\" (UID: \"e3e51685-6e25-4bcd-8194-4faf0947962a\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-7bgzh" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.196476 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xmctq\" (UniqueName: \"kubernetes.io/projected/bcafa8f9-8abb-4015-8afc-6767b0485ad8-kube-api-access-xmctq\") pod \"octavia-operator-controller-manager-fd75fd47d-wcjpf\" (UID: \"bcafa8f9-8abb-4015-8afc-6767b0485ad8\") " pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-wcjpf" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.200352 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-lcbwz" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.203511 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.203865 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-xh9hw" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.213940 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-66cf5c67ff-tcblk"] Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.225929 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-tcblk" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.233652 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-6d8wr" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.258879 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-4tvd5" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.261258 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jwkbv\" (UniqueName: \"kubernetes.io/projected/2b0d30d6-6825-4d29-b5bb-9ea86f790b6f-kube-api-access-jwkbv\") pod \"neutron-operator-controller-manager-7c57c8bbc4-9nqtc\" (UID: \"2b0d30d6-6825-4d29-b5bb-9ea86f790b6f\") " pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-9nqtc" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.263250 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jcvh2\" (UniqueName: \"kubernetes.io/projected/e3e51685-6e25-4bcd-8194-4faf0947962a-kube-api-access-jcvh2\") pod \"nova-operator-controller-manager-79556f57fc-7bgzh\" (UID: \"e3e51685-6e25-4bcd-8194-4faf0947962a\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-7bgzh" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.277512 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-5db546f9d9-v88zs"] Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.289726 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-v88zs" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.290961 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-dlq5q" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.297128 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-n2h6m" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.298525 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/77de3154-620e-407a-97e9-94d3dd90ced7-cert\") pod \"infra-operator-controller-manager-d5cc86f4b-n9bk6\" (UID: \"77de3154-620e-407a-97e9-94d3dd90ced7\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-n9bk6" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.298622 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2cn5d\" (UniqueName: \"kubernetes.io/projected/62c72ef0-d6f3-4b75-aa57-43d934de39e9-kube-api-access-2cn5d\") pod \"ovn-operator-controller-manager-66cf5c67ff-tcblk\" (UID: \"62c72ef0-d6f3-4b75-aa57-43d934de39e9\") " pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-tcblk" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.298651 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q7jjg\" (UniqueName: \"kubernetes.io/projected/0ef84c87-7162-45bb-9622-48b2b37e50bd-kube-api-access-q7jjg\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-lcbwz\" (UID: \"0ef84c87-7162-45bb-9622-48b2b37e50bd\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-lcbwz" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.298686 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xmctq\" (UniqueName: \"kubernetes.io/projected/bcafa8f9-8abb-4015-8afc-6767b0485ad8-kube-api-access-xmctq\") pod \"octavia-operator-controller-manager-fd75fd47d-wcjpf\" (UID: \"bcafa8f9-8abb-4015-8afc-6767b0485ad8\") " pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-wcjpf" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.298733 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0ef84c87-7162-45bb-9622-48b2b37e50bd-cert\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-lcbwz\" (UID: \"0ef84c87-7162-45bb-9622-48b2b37e50bd\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-lcbwz" Nov 25 09:54:33 crc kubenswrapper[4854]: E1125 09:54:33.298850 4854 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 25 09:54:33 crc kubenswrapper[4854]: E1125 09:54:33.298884 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/77de3154-620e-407a-97e9-94d3dd90ced7-cert podName:77de3154-620e-407a-97e9-94d3dd90ced7 nodeName:}" failed. No retries permitted until 2025-11-25 09:54:34.298871552 +0000 UTC m=+1080.151864918 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/77de3154-620e-407a-97e9-94d3dd90ced7-cert") pod "infra-operator-controller-manager-d5cc86f4b-n9bk6" (UID: "77de3154-620e-407a-97e9-94d3dd90ced7") : secret "infra-operator-webhook-server-cert" not found Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.309474 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-9nqtc" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.332581 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xmctq\" (UniqueName: \"kubernetes.io/projected/bcafa8f9-8abb-4015-8afc-6767b0485ad8-kube-api-access-xmctq\") pod \"octavia-operator-controller-manager-fd75fd47d-wcjpf\" (UID: \"bcafa8f9-8abb-4015-8afc-6767b0485ad8\") " pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-wcjpf" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.335583 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-66cf5c67ff-tcblk"] Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.344265 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-7bgzh" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.381590 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-lcbwz"] Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.405160 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2cn5d\" (UniqueName: \"kubernetes.io/projected/62c72ef0-d6f3-4b75-aa57-43d934de39e9-kube-api-access-2cn5d\") pod \"ovn-operator-controller-manager-66cf5c67ff-tcblk\" (UID: \"62c72ef0-d6f3-4b75-aa57-43d934de39e9\") " pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-tcblk" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.405217 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q7jjg\" (UniqueName: \"kubernetes.io/projected/0ef84c87-7162-45bb-9622-48b2b37e50bd-kube-api-access-q7jjg\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-lcbwz\" (UID: \"0ef84c87-7162-45bb-9622-48b2b37e50bd\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-lcbwz" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.405270 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0ef84c87-7162-45bb-9622-48b2b37e50bd-cert\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-lcbwz\" (UID: \"0ef84c87-7162-45bb-9622-48b2b37e50bd\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-lcbwz" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.405341 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k2n88\" (UniqueName: \"kubernetes.io/projected/c0ebacaa-bc16-4876-87cb-011f523a59a4-kube-api-access-k2n88\") pod \"placement-operator-controller-manager-5db546f9d9-v88zs\" (UID: \"c0ebacaa-bc16-4876-87cb-011f523a59a4\") " pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-v88zs" Nov 25 09:54:33 crc kubenswrapper[4854]: E1125 09:54:33.405923 4854 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 09:54:33 crc kubenswrapper[4854]: E1125 09:54:33.405961 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0ef84c87-7162-45bb-9622-48b2b37e50bd-cert podName:0ef84c87-7162-45bb-9622-48b2b37e50bd nodeName:}" failed. No retries permitted until 2025-11-25 09:54:33.905948209 +0000 UTC m=+1079.758941585 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/0ef84c87-7162-45bb-9622-48b2b37e50bd-cert") pod "openstack-baremetal-operator-controller-manager-544b9bb9-lcbwz" (UID: "0ef84c87-7162-45bb-9622-48b2b37e50bd") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.420020 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-wcjpf" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.422939 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-6fdc4fcf86-rwp8j"] Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.424297 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-rwp8j" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.442098 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-z24gj" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.445801 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5db546f9d9-v88zs"] Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.455972 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2cn5d\" (UniqueName: \"kubernetes.io/projected/62c72ef0-d6f3-4b75-aa57-43d934de39e9-kube-api-access-2cn5d\") pod \"ovn-operator-controller-manager-66cf5c67ff-tcblk\" (UID: \"62c72ef0-d6f3-4b75-aa57-43d934de39e9\") " pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-tcblk" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.464214 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q7jjg\" (UniqueName: \"kubernetes.io/projected/0ef84c87-7162-45bb-9622-48b2b37e50bd-kube-api-access-q7jjg\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-lcbwz\" (UID: \"0ef84c87-7162-45bb-9622-48b2b37e50bd\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-lcbwz" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.505063 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-6fdc4fcf86-rwp8j"] Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.510253 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-754vx\" (UniqueName: \"kubernetes.io/projected/17936560-9d8d-4c29-b12a-451abdc2787a-kube-api-access-754vx\") pod \"swift-operator-controller-manager-6fdc4fcf86-rwp8j\" (UID: \"17936560-9d8d-4c29-b12a-451abdc2787a\") " pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-rwp8j" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.510430 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k2n88\" (UniqueName: \"kubernetes.io/projected/c0ebacaa-bc16-4876-87cb-011f523a59a4-kube-api-access-k2n88\") pod \"placement-operator-controller-manager-5db546f9d9-v88zs\" (UID: \"c0ebacaa-bc16-4876-87cb-011f523a59a4\") " pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-v88zs" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.539748 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-67b89c8998-d98c4"] Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.541564 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-67b89c8998-d98c4" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.546162 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-6jjqh" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.571817 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k2n88\" (UniqueName: \"kubernetes.io/projected/c0ebacaa-bc16-4876-87cb-011f523a59a4-kube-api-access-k2n88\") pod \"placement-operator-controller-manager-5db546f9d9-v88zs\" (UID: \"c0ebacaa-bc16-4876-87cb-011f523a59a4\") " pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-v88zs" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.572191 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-5cb74df96-vddrp"] Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.589121 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-tcblk" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.600741 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cb74df96-vddrp" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.603085 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-gh7kl" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.612892 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-754vx\" (UniqueName: \"kubernetes.io/projected/17936560-9d8d-4c29-b12a-451abdc2787a-kube-api-access-754vx\") pod \"swift-operator-controller-manager-6fdc4fcf86-rwp8j\" (UID: \"17936560-9d8d-4c29-b12a-451abdc2787a\") " pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-rwp8j" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.612972 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dzpgf\" (UniqueName: \"kubernetes.io/projected/91d4e118-8133-4531-8e70-afb240453f11-kube-api-access-dzpgf\") pod \"telemetry-operator-controller-manager-67b89c8998-d98c4\" (UID: \"91d4e118-8133-4531-8e70-afb240453f11\") " pod="openstack-operators/telemetry-operator-controller-manager-67b89c8998-d98c4" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.666984 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-67b89c8998-d98c4"] Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.699214 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cb74df96-vddrp"] Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.712546 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-754vx\" (UniqueName: \"kubernetes.io/projected/17936560-9d8d-4c29-b12a-451abdc2787a-kube-api-access-754vx\") pod \"swift-operator-controller-manager-6fdc4fcf86-rwp8j\" (UID: \"17936560-9d8d-4c29-b12a-451abdc2787a\") " pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-rwp8j" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.725554 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zbx7f\" (UniqueName: \"kubernetes.io/projected/0e528244-b216-46fa-95a8-8b0faf6a50df-kube-api-access-zbx7f\") pod \"test-operator-controller-manager-5cb74df96-vddrp\" (UID: \"0e528244-b216-46fa-95a8-8b0faf6a50df\") " pod="openstack-operators/test-operator-controller-manager-5cb74df96-vddrp" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.725801 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dzpgf\" (UniqueName: \"kubernetes.io/projected/91d4e118-8133-4531-8e70-afb240453f11-kube-api-access-dzpgf\") pod \"telemetry-operator-controller-manager-67b89c8998-d98c4\" (UID: \"91d4e118-8133-4531-8e70-afb240453f11\") " pod="openstack-operators/telemetry-operator-controller-manager-67b89c8998-d98c4" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.731265 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-v88zs" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.753221 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-864885998-9pstc"] Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.754691 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-864885998-9pstc" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.759823 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-lf84j" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.776986 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dzpgf\" (UniqueName: \"kubernetes.io/projected/91d4e118-8133-4531-8e70-afb240453f11-kube-api-access-dzpgf\") pod \"telemetry-operator-controller-manager-67b89c8998-d98c4\" (UID: \"91d4e118-8133-4531-8e70-afb240453f11\") " pod="openstack-operators/telemetry-operator-controller-manager-67b89c8998-d98c4" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.789000 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-864885998-9pstc"] Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.830147 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zbx7f\" (UniqueName: \"kubernetes.io/projected/0e528244-b216-46fa-95a8-8b0faf6a50df-kube-api-access-zbx7f\") pod \"test-operator-controller-manager-5cb74df96-vddrp\" (UID: \"0e528244-b216-46fa-95a8-8b0faf6a50df\") " pod="openstack-operators/test-operator-controller-manager-5cb74df96-vddrp" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.856711 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zbx7f\" (UniqueName: \"kubernetes.io/projected/0e528244-b216-46fa-95a8-8b0faf6a50df-kube-api-access-zbx7f\") pod \"test-operator-controller-manager-5cb74df96-vddrp\" (UID: \"0e528244-b216-46fa-95a8-8b0faf6a50df\") " pod="openstack-operators/test-operator-controller-manager-5cb74df96-vddrp" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.872213 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-848ff5c487-2kbtl"] Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.873770 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-848ff5c487-2kbtl" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.877712 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.879923 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.881034 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-pkgng" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.897839 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-848ff5c487-2kbtl"] Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.920990 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-gwfmz"] Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.922492 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-gwfmz" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.926787 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-mnbgt" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.931961 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0ef84c87-7162-45bb-9622-48b2b37e50bd-cert\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-lcbwz\" (UID: \"0ef84c87-7162-45bb-9622-48b2b37e50bd\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-lcbwz" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.932111 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mllbf\" (UniqueName: \"kubernetes.io/projected/0e7943a0-c710-4440-a8be-932c12cfd4de-kube-api-access-mllbf\") pod \"watcher-operator-controller-manager-864885998-9pstc\" (UID: \"0e7943a0-c710-4440-a8be-932c12cfd4de\") " pod="openstack-operators/watcher-operator-controller-manager-864885998-9pstc" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.936463 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0ef84c87-7162-45bb-9622-48b2b37e50bd-cert\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-lcbwz\" (UID: \"0ef84c87-7162-45bb-9622-48b2b37e50bd\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-lcbwz" Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.945509 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-gwfmz"] Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.972265 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-n99lm" event={"ID":"5ed548fa-16b5-4733-b798-a0819bc8e77d","Type":"ContainerStarted","Data":"0e18686fe01e00f9c3a43b197473b8244db4cdab8e3a4847073912e88dc86bbf"} Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.978027 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-79856dc55c-vfv6r"] Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.979019 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-vfv6r" event={"ID":"e0afee56-5768-44f9-af4d-da496a95ae39","Type":"ContainerStarted","Data":"c1f10bc67678fba36d65d13ec9eabad814fa9b622da4507ded4203e6f7f257ef"} Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.979866 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-pvjbb" event={"ID":"df2830c6-0e67-4aea-b14d-101a5323617b","Type":"ContainerStarted","Data":"9b120f713b20e0fe15adc75c2d6a649584c7ae1c1a4e6359e0fd851423b210dc"} Nov 25 09:54:33 crc kubenswrapper[4854]: I1125 09:54:33.997474 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-86dc4d89c8-pvjbb"] Nov 25 09:54:34 crc kubenswrapper[4854]: I1125 09:54:34.000596 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-rwp8j" Nov 25 09:54:34 crc kubenswrapper[4854]: I1125 09:54:34.037275 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mllbf\" (UniqueName: \"kubernetes.io/projected/0e7943a0-c710-4440-a8be-932c12cfd4de-kube-api-access-mllbf\") pod \"watcher-operator-controller-manager-864885998-9pstc\" (UID: \"0e7943a0-c710-4440-a8be-932c12cfd4de\") " pod="openstack-operators/watcher-operator-controller-manager-864885998-9pstc" Nov 25 09:54:34 crc kubenswrapper[4854]: I1125 09:54:34.037354 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/21f043af-1d20-48fd-a8eb-40cdbee6ab8c-metrics-certs\") pod \"openstack-operator-controller-manager-848ff5c487-2kbtl\" (UID: \"21f043af-1d20-48fd-a8eb-40cdbee6ab8c\") " pod="openstack-operators/openstack-operator-controller-manager-848ff5c487-2kbtl" Nov 25 09:54:34 crc kubenswrapper[4854]: I1125 09:54:34.037382 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mhd7z\" (UniqueName: \"kubernetes.io/projected/3879e5ff-7566-4cd8-bcac-e8c07a79f965-kube-api-access-mhd7z\") pod \"rabbitmq-cluster-operator-manager-668c99d594-gwfmz\" (UID: \"3879e5ff-7566-4cd8-bcac-e8c07a79f965\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-gwfmz" Nov 25 09:54:34 crc kubenswrapper[4854]: I1125 09:54:34.037442 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vrbkt\" (UniqueName: \"kubernetes.io/projected/21f043af-1d20-48fd-a8eb-40cdbee6ab8c-kube-api-access-vrbkt\") pod \"openstack-operator-controller-manager-848ff5c487-2kbtl\" (UID: \"21f043af-1d20-48fd-a8eb-40cdbee6ab8c\") " pod="openstack-operators/openstack-operator-controller-manager-848ff5c487-2kbtl" Nov 25 09:54:34 crc kubenswrapper[4854]: I1125 09:54:34.037474 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/21f043af-1d20-48fd-a8eb-40cdbee6ab8c-webhook-certs\") pod \"openstack-operator-controller-manager-848ff5c487-2kbtl\" (UID: \"21f043af-1d20-48fd-a8eb-40cdbee6ab8c\") " pod="openstack-operators/openstack-operator-controller-manager-848ff5c487-2kbtl" Nov 25 09:54:34 crc kubenswrapper[4854]: W1125 09:54:34.046896 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1da6b327_2319_4a92_9555_736d992a3348.slice/crio-41839fb64a678ee36589e9d383193163dd78c52527fdc84d8d151227017e6942 WatchSource:0}: Error finding container 41839fb64a678ee36589e9d383193163dd78c52527fdc84d8d151227017e6942: Status 404 returned error can't find the container with id 41839fb64a678ee36589e9d383193163dd78c52527fdc84d8d151227017e6942 Nov 25 09:54:34 crc kubenswrapper[4854]: I1125 09:54:34.057066 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mllbf\" (UniqueName: \"kubernetes.io/projected/0e7943a0-c710-4440-a8be-932c12cfd4de-kube-api-access-mllbf\") pod \"watcher-operator-controller-manager-864885998-9pstc\" (UID: \"0e7943a0-c710-4440-a8be-932c12cfd4de\") " pod="openstack-operators/watcher-operator-controller-manager-864885998-9pstc" Nov 25 09:54:34 crc kubenswrapper[4854]: I1125 09:54:34.063090 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-67b89c8998-d98c4" Nov 25 09:54:34 crc kubenswrapper[4854]: I1125 09:54:34.063553 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-68b95954c9-n99lm"] Nov 25 09:54:34 crc kubenswrapper[4854]: I1125 09:54:34.090093 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-774b86978c-zp5bz"] Nov 25 09:54:34 crc kubenswrapper[4854]: W1125 09:54:34.093197 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5a9dc90e_3728_42c9_897c_7b28035196bd.slice/crio-a62a9f16e04d2e22b53681bf7b33e070ed81db1f9d76b91c67d743bd5bb49a8d WatchSource:0}: Error finding container a62a9f16e04d2e22b53681bf7b33e070ed81db1f9d76b91c67d743bd5bb49a8d: Status 404 returned error can't find the container with id a62a9f16e04d2e22b53681bf7b33e070ed81db1f9d76b91c67d743bd5bb49a8d Nov 25 09:54:34 crc kubenswrapper[4854]: I1125 09:54:34.100180 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-7d695c9b56-bjjdt"] Nov 25 09:54:34 crc kubenswrapper[4854]: I1125 09:54:34.111602 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cb74df96-vddrp" Nov 25 09:54:34 crc kubenswrapper[4854]: I1125 09:54:34.137186 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-864885998-9pstc" Nov 25 09:54:34 crc kubenswrapper[4854]: I1125 09:54:34.138395 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/21f043af-1d20-48fd-a8eb-40cdbee6ab8c-metrics-certs\") pod \"openstack-operator-controller-manager-848ff5c487-2kbtl\" (UID: \"21f043af-1d20-48fd-a8eb-40cdbee6ab8c\") " pod="openstack-operators/openstack-operator-controller-manager-848ff5c487-2kbtl" Nov 25 09:54:34 crc kubenswrapper[4854]: I1125 09:54:34.138428 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mhd7z\" (UniqueName: \"kubernetes.io/projected/3879e5ff-7566-4cd8-bcac-e8c07a79f965-kube-api-access-mhd7z\") pod \"rabbitmq-cluster-operator-manager-668c99d594-gwfmz\" (UID: \"3879e5ff-7566-4cd8-bcac-e8c07a79f965\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-gwfmz" Nov 25 09:54:34 crc kubenswrapper[4854]: I1125 09:54:34.138479 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vrbkt\" (UniqueName: \"kubernetes.io/projected/21f043af-1d20-48fd-a8eb-40cdbee6ab8c-kube-api-access-vrbkt\") pod \"openstack-operator-controller-manager-848ff5c487-2kbtl\" (UID: \"21f043af-1d20-48fd-a8eb-40cdbee6ab8c\") " pod="openstack-operators/openstack-operator-controller-manager-848ff5c487-2kbtl" Nov 25 09:54:34 crc kubenswrapper[4854]: I1125 09:54:34.138511 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/21f043af-1d20-48fd-a8eb-40cdbee6ab8c-webhook-certs\") pod \"openstack-operator-controller-manager-848ff5c487-2kbtl\" (UID: \"21f043af-1d20-48fd-a8eb-40cdbee6ab8c\") " pod="openstack-operators/openstack-operator-controller-manager-848ff5c487-2kbtl" Nov 25 09:54:34 crc kubenswrapper[4854]: E1125 09:54:34.138647 4854 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 25 09:54:34 crc kubenswrapper[4854]: E1125 09:54:34.138841 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/21f043af-1d20-48fd-a8eb-40cdbee6ab8c-webhook-certs podName:21f043af-1d20-48fd-a8eb-40cdbee6ab8c nodeName:}" failed. No retries permitted until 2025-11-25 09:54:34.638825825 +0000 UTC m=+1080.491819201 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/21f043af-1d20-48fd-a8eb-40cdbee6ab8c-webhook-certs") pod "openstack-operator-controller-manager-848ff5c487-2kbtl" (UID: "21f043af-1d20-48fd-a8eb-40cdbee6ab8c") : secret "webhook-server-cert" not found Nov 25 09:54:34 crc kubenswrapper[4854]: E1125 09:54:34.141269 4854 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 25 09:54:34 crc kubenswrapper[4854]: E1125 09:54:34.141376 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/21f043af-1d20-48fd-a8eb-40cdbee6ab8c-metrics-certs podName:21f043af-1d20-48fd-a8eb-40cdbee6ab8c nodeName:}" failed. No retries permitted until 2025-11-25 09:54:34.641353084 +0000 UTC m=+1080.494346520 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/21f043af-1d20-48fd-a8eb-40cdbee6ab8c-metrics-certs") pod "openstack-operator-controller-manager-848ff5c487-2kbtl" (UID: "21f043af-1d20-48fd-a8eb-40cdbee6ab8c") : secret "metrics-server-cert" not found Nov 25 09:54:34 crc kubenswrapper[4854]: I1125 09:54:34.158978 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mhd7z\" (UniqueName: \"kubernetes.io/projected/3879e5ff-7566-4cd8-bcac-e8c07a79f965-kube-api-access-mhd7z\") pod \"rabbitmq-cluster-operator-manager-668c99d594-gwfmz\" (UID: \"3879e5ff-7566-4cd8-bcac-e8c07a79f965\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-gwfmz" Nov 25 09:54:34 crc kubenswrapper[4854]: I1125 09:54:34.159078 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vrbkt\" (UniqueName: \"kubernetes.io/projected/21f043af-1d20-48fd-a8eb-40cdbee6ab8c-kube-api-access-vrbkt\") pod \"openstack-operator-controller-manager-848ff5c487-2kbtl\" (UID: \"21f043af-1d20-48fd-a8eb-40cdbee6ab8c\") " pod="openstack-operators/openstack-operator-controller-manager-848ff5c487-2kbtl" Nov 25 09:54:34 crc kubenswrapper[4854]: I1125 09:54:34.159438 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-lcbwz" Nov 25 09:54:34 crc kubenswrapper[4854]: I1125 09:54:34.204230 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c9694994-jx54m"] Nov 25 09:54:34 crc kubenswrapper[4854]: W1125 09:54:34.227398 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod13c658f9_13c2_43d5_9b8a_d30484e5943f.slice/crio-c2828e5d6cc9551819380cbd9fce2ed2da12b63761c1c37fb0e81356ba8790de WatchSource:0}: Error finding container c2828e5d6cc9551819380cbd9fce2ed2da12b63761c1c37fb0e81356ba8790de: Status 404 returned error can't find the container with id c2828e5d6cc9551819380cbd9fce2ed2da12b63761c1c37fb0e81356ba8790de Nov 25 09:54:34 crc kubenswrapper[4854]: I1125 09:54:34.253072 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-gwfmz" Nov 25 09:54:34 crc kubenswrapper[4854]: I1125 09:54:34.346791 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/77de3154-620e-407a-97e9-94d3dd90ced7-cert\") pod \"infra-operator-controller-manager-d5cc86f4b-n9bk6\" (UID: \"77de3154-620e-407a-97e9-94d3dd90ced7\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-n9bk6" Nov 25 09:54:34 crc kubenswrapper[4854]: I1125 09:54:34.355962 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/77de3154-620e-407a-97e9-94d3dd90ced7-cert\") pod \"infra-operator-controller-manager-d5cc86f4b-n9bk6\" (UID: \"77de3154-620e-407a-97e9-94d3dd90ced7\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-n9bk6" Nov 25 09:54:34 crc kubenswrapper[4854]: I1125 09:54:34.483033 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-748dc6576f-sks98"] Nov 25 09:54:34 crc kubenswrapper[4854]: I1125 09:54:34.509949 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-dlq5q"] Nov 25 09:54:34 crc kubenswrapper[4854]: I1125 09:54:34.517147 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5bfcdc958c-7l4fm"] Nov 25 09:54:34 crc kubenswrapper[4854]: W1125 09:54:34.523024 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5c0186e4_d72a_4281_ab41_d012f2d4d775.slice/crio-75ef071d57a73a478c171cd30262eec9d1b65b955fc9724acb2eb6a620fe767d WatchSource:0}: Error finding container 75ef071d57a73a478c171cd30262eec9d1b65b955fc9724acb2eb6a620fe767d: Status 404 returned error can't find the container with id 75ef071d57a73a478c171cd30262eec9d1b65b955fc9724acb2eb6a620fe767d Nov 25 09:54:34 crc kubenswrapper[4854]: W1125 09:54:34.528582 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod10550c56_b051_4d65_a13c_3854e40d2869.slice/crio-c6af33a7a1bd69904aa6b1a5e358676033dc0b25e5736210a3108daa9047aed8 WatchSource:0}: Error finding container c6af33a7a1bd69904aa6b1a5e358676033dc0b25e5736210a3108daa9047aed8: Status 404 returned error can't find the container with id c6af33a7a1bd69904aa6b1a5e358676033dc0b25e5736210a3108daa9047aed8 Nov 25 09:54:34 crc kubenswrapper[4854]: I1125 09:54:34.543289 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-n9bk6" Nov 25 09:54:34 crc kubenswrapper[4854]: I1125 09:54:34.653752 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/21f043af-1d20-48fd-a8eb-40cdbee6ab8c-webhook-certs\") pod \"openstack-operator-controller-manager-848ff5c487-2kbtl\" (UID: \"21f043af-1d20-48fd-a8eb-40cdbee6ab8c\") " pod="openstack-operators/openstack-operator-controller-manager-848ff5c487-2kbtl" Nov 25 09:54:34 crc kubenswrapper[4854]: I1125 09:54:34.653935 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/21f043af-1d20-48fd-a8eb-40cdbee6ab8c-metrics-certs\") pod \"openstack-operator-controller-manager-848ff5c487-2kbtl\" (UID: \"21f043af-1d20-48fd-a8eb-40cdbee6ab8c\") " pod="openstack-operators/openstack-operator-controller-manager-848ff5c487-2kbtl" Nov 25 09:54:34 crc kubenswrapper[4854]: E1125 09:54:34.654049 4854 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 25 09:54:34 crc kubenswrapper[4854]: E1125 09:54:34.654155 4854 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 25 09:54:34 crc kubenswrapper[4854]: E1125 09:54:34.654511 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/21f043af-1d20-48fd-a8eb-40cdbee6ab8c-webhook-certs podName:21f043af-1d20-48fd-a8eb-40cdbee6ab8c nodeName:}" failed. No retries permitted until 2025-11-25 09:54:35.65409672 +0000 UTC m=+1081.507090096 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/21f043af-1d20-48fd-a8eb-40cdbee6ab8c-webhook-certs") pod "openstack-operator-controller-manager-848ff5c487-2kbtl" (UID: "21f043af-1d20-48fd-a8eb-40cdbee6ab8c") : secret "webhook-server-cert" not found Nov 25 09:54:34 crc kubenswrapper[4854]: E1125 09:54:34.654543 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/21f043af-1d20-48fd-a8eb-40cdbee6ab8c-metrics-certs podName:21f043af-1d20-48fd-a8eb-40cdbee6ab8c nodeName:}" failed. No retries permitted until 2025-11-25 09:54:35.654532832 +0000 UTC m=+1081.507526208 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/21f043af-1d20-48fd-a8eb-40cdbee6ab8c-metrics-certs") pod "openstack-operator-controller-manager-848ff5c487-2kbtl" (UID: "21f043af-1d20-48fd-a8eb-40cdbee6ab8c") : secret "metrics-server-cert" not found Nov 25 09:54:35 crc kubenswrapper[4854]: I1125 09:54:35.005122 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-sks98" event={"ID":"d51e017c-9c85-443a-a5b9-b8b8969bb019","Type":"ContainerStarted","Data":"8e0b848f50a941a1ebc3317f1ac096b46f3abd7e81b952525f66779af7e84ce1"} Nov 25 09:54:35 crc kubenswrapper[4854]: I1125 09:54:35.007116 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-zp5bz" event={"ID":"1da6b327-2319-4a92-9555-736d992a3348","Type":"ContainerStarted","Data":"41839fb64a678ee36589e9d383193163dd78c52527fdc84d8d151227017e6942"} Nov 25 09:54:35 crc kubenswrapper[4854]: I1125 09:54:35.008511 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-7l4fm" event={"ID":"5c0186e4-d72a-4281-ab41-d012f2d4d775","Type":"ContainerStarted","Data":"75ef071d57a73a478c171cd30262eec9d1b65b955fc9724acb2eb6a620fe767d"} Nov 25 09:54:35 crc kubenswrapper[4854]: I1125 09:54:35.011364 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-dlq5q" event={"ID":"10550c56-b051-4d65-a13c-3854e40d2869","Type":"ContainerStarted","Data":"c6af33a7a1bd69904aa6b1a5e358676033dc0b25e5736210a3108daa9047aed8"} Nov 25 09:54:35 crc kubenswrapper[4854]: I1125 09:54:35.047534 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-bjjdt" event={"ID":"5a9dc90e-3728-42c9-897c-7b28035196bd","Type":"ContainerStarted","Data":"a62a9f16e04d2e22b53681bf7b33e070ed81db1f9d76b91c67d743bd5bb49a8d"} Nov 25 09:54:35 crc kubenswrapper[4854]: I1125 09:54:35.047596 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jx54m" event={"ID":"13c658f9-13c2-43d5-9b8a-d30484e5943f","Type":"ContainerStarted","Data":"c2828e5d6cc9551819380cbd9fce2ed2da12b63761c1c37fb0e81356ba8790de"} Nov 25 09:54:35 crc kubenswrapper[4854]: I1125 09:54:35.232733 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5db546f9d9-v88zs"] Nov 25 09:54:35 crc kubenswrapper[4854]: I1125 09:54:35.244354 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-fd75fd47d-wcjpf"] Nov 25 09:54:35 crc kubenswrapper[4854]: I1125 09:54:35.287289 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-66cf5c67ff-tcblk"] Nov 25 09:54:35 crc kubenswrapper[4854]: I1125 09:54:35.305432 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-7bgzh"] Nov 25 09:54:35 crc kubenswrapper[4854]: I1125 09:54:35.367826 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-9nqtc"] Nov 25 09:54:35 crc kubenswrapper[4854]: I1125 09:54:35.393240 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-58bb8d67cc-4tvd5"] Nov 25 09:54:35 crc kubenswrapper[4854]: I1125 09:54:35.582206 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-6fdc4fcf86-rwp8j"] Nov 25 09:54:35 crc kubenswrapper[4854]: I1125 09:54:35.616782 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-67b89c8998-d98c4"] Nov 25 09:54:35 crc kubenswrapper[4854]: I1125 09:54:35.644798 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-864885998-9pstc"] Nov 25 09:54:35 crc kubenswrapper[4854]: I1125 09:54:35.684965 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cb74df96-vddrp"] Nov 25 09:54:35 crc kubenswrapper[4854]: I1125 09:54:35.692136 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/21f043af-1d20-48fd-a8eb-40cdbee6ab8c-metrics-certs\") pod \"openstack-operator-controller-manager-848ff5c487-2kbtl\" (UID: \"21f043af-1d20-48fd-a8eb-40cdbee6ab8c\") " pod="openstack-operators/openstack-operator-controller-manager-848ff5c487-2kbtl" Nov 25 09:54:35 crc kubenswrapper[4854]: E1125 09:54:35.692427 4854 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 25 09:54:35 crc kubenswrapper[4854]: E1125 09:54:35.692509 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/21f043af-1d20-48fd-a8eb-40cdbee6ab8c-metrics-certs podName:21f043af-1d20-48fd-a8eb-40cdbee6ab8c nodeName:}" failed. No retries permitted until 2025-11-25 09:54:37.692489416 +0000 UTC m=+1083.545482802 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/21f043af-1d20-48fd-a8eb-40cdbee6ab8c-metrics-certs") pod "openstack-operator-controller-manager-848ff5c487-2kbtl" (UID: "21f043af-1d20-48fd-a8eb-40cdbee6ab8c") : secret "metrics-server-cert" not found Nov 25 09:54:35 crc kubenswrapper[4854]: I1125 09:54:35.692695 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/21f043af-1d20-48fd-a8eb-40cdbee6ab8c-webhook-certs\") pod \"openstack-operator-controller-manager-848ff5c487-2kbtl\" (UID: \"21f043af-1d20-48fd-a8eb-40cdbee6ab8c\") " pod="openstack-operators/openstack-operator-controller-manager-848ff5c487-2kbtl" Nov 25 09:54:35 crc kubenswrapper[4854]: E1125 09:54:35.700436 4854 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 25 09:54:35 crc kubenswrapper[4854]: E1125 09:54:35.700516 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/21f043af-1d20-48fd-a8eb-40cdbee6ab8c-webhook-certs podName:21f043af-1d20-48fd-a8eb-40cdbee6ab8c nodeName:}" failed. No retries permitted until 2025-11-25 09:54:37.700495635 +0000 UTC m=+1083.553489011 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/21f043af-1d20-48fd-a8eb-40cdbee6ab8c-webhook-certs") pod "openstack-operator-controller-manager-848ff5c487-2kbtl" (UID: "21f043af-1d20-48fd-a8eb-40cdbee6ab8c") : secret "webhook-server-cert" not found Nov 25 09:54:35 crc kubenswrapper[4854]: I1125 09:54:35.758745 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-gwfmz"] Nov 25 09:54:35 crc kubenswrapper[4854]: I1125 09:54:35.772381 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-d5cc86f4b-n9bk6"] Nov 25 09:54:35 crc kubenswrapper[4854]: I1125 09:54:35.790277 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-lcbwz"] Nov 25 09:54:35 crc kubenswrapper[4854]: E1125 09:54:35.808573 4854 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-mhd7z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-gwfmz_openstack-operators(3879e5ff-7566-4cd8-bcac-e8c07a79f965): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 09:54:35 crc kubenswrapper[4854]: E1125 09:54:35.809197 4854 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:78852f8ba332a5756c1551c126157f735279101a0fc3277ba4aa4db3478789dd,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-baremetal-operator-agent:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_ANSIBLEEE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_EVALUATOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-evaluator:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_NOTIFIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-notifier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_APACHE_IMAGE_URL_DEFAULT,Value:registry.redhat.io/ubi9/httpd-24:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_KEYSTONE_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-keystone-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_IPMI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_MYSQLD_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/mysqld-exporter:v0.15.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_NOTIFICATION_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-notification:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_SGCORE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/sg-core:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_BACKUP_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-backup:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_VOLUME_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-volume:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_API_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_PROC_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-processor:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_BACKENDBIND9_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-backend-bind9:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_MDNS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-mdns:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_PRODUCER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-producer:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_UNBOUND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-unbound:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_FRR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-frr:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_ISCSID_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-iscsid:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_KEPLER_IMAGE_URL_DEFAULT,Value:quay.io/sustainable_computing_io/kepler:release-0.7.12,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_LOGROTATE_CROND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cron:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_MULTIPATHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-multipathd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_DHCP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_METADATA_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_OVN_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-ovn-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_SRIOV_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-sriov-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NODE_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/node-exporter:v1.5.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_OVN_BGP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-bgp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_PODMAN_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/navidys/prometheus-podman-exporter:v1.10.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_GLANCE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_CFNAPI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api-cfn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HORIZON_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_MEMCACHED_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_REDIS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-redis:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_INSPECTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-inspector:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_NEUTRON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-neutron-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PXE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-pxe:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PYTHON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/ironic-python-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KEYSTONE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-keystone:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KSM_IMAGE_URL_DEFAULT,Value:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SHARE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-share:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MARIADB_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NET_UTILS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-netutils:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NEUTRON_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_NOVNC_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-novncproxy:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HEALTHMANAGER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-health-manager:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HOUSEKEEPING_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-housekeeping:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_RSYSLOG_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rsyslog:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_CLIENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_MUST_GATHER_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-must-gather:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_NETWORK_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OS_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/edpm-hardened-uefi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_OVS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NORTHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-northd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_SB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PLACEMENT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_RABBITMQ_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_ACCOUNT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-account:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-container:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_OBJECT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-object:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_PROXY_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-proxy-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_TEST_TEMPEST_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_APPLIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-applier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_DECISION_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-decision-engine:current-podified,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-q7jjg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-baremetal-operator-controller-manager-544b9bb9-lcbwz_openstack-operators(0ef84c87-7162-45bb-9622-48b2b37e50bd): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 09:54:35 crc kubenswrapper[4854]: E1125 09:54:35.809779 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-gwfmz" podUID="3879e5ff-7566-4cd8-bcac-e8c07a79f965" Nov 25 09:54:35 crc kubenswrapper[4854]: E1125 09:54:35.815915 4854 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-q7jjg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-baremetal-operator-controller-manager-544b9bb9-lcbwz_openstack-operators(0ef84c87-7162-45bb-9622-48b2b37e50bd): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 09:54:35 crc kubenswrapper[4854]: E1125 09:54:35.817186 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-lcbwz" podUID="0ef84c87-7162-45bb-9622-48b2b37e50bd" Nov 25 09:54:36 crc kubenswrapper[4854]: I1125 09:54:36.047846 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-v88zs" event={"ID":"c0ebacaa-bc16-4876-87cb-011f523a59a4","Type":"ContainerStarted","Data":"906cf4c834b4e9376f70583220e0a8b68f63fb644f02add3093d788693bf0863"} Nov 25 09:54:36 crc kubenswrapper[4854]: I1125 09:54:36.058518 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-67b89c8998-d98c4" event={"ID":"91d4e118-8133-4531-8e70-afb240453f11","Type":"ContainerStarted","Data":"7bd938a3a6fea7d462154211ec4aaee53737e3c43fa6a627d5742ddee56c4ba2"} Nov 25 09:54:36 crc kubenswrapper[4854]: I1125 09:54:36.061376 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-gwfmz" event={"ID":"3879e5ff-7566-4cd8-bcac-e8c07a79f965","Type":"ContainerStarted","Data":"23a0ca3073418ae73306728ffcc6a33382b9867f1db47c296dc254155dedb4de"} Nov 25 09:54:36 crc kubenswrapper[4854]: I1125 09:54:36.065465 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-9pstc" event={"ID":"0e7943a0-c710-4440-a8be-932c12cfd4de","Type":"ContainerStarted","Data":"efeb8958c55ca2cbc132b906c3d1630042da5044b22f097afe0bfa8a5db749bc"} Nov 25 09:54:36 crc kubenswrapper[4854]: E1125 09:54:36.065799 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-gwfmz" podUID="3879e5ff-7566-4cd8-bcac-e8c07a79f965" Nov 25 09:54:36 crc kubenswrapper[4854]: I1125 09:54:36.091451 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-wcjpf" event={"ID":"bcafa8f9-8abb-4015-8afc-6767b0485ad8","Type":"ContainerStarted","Data":"cb4056c40ed1a6ebb3806ed82f4bf3877913b10d19ff2ee159a075aeab5decd3"} Nov 25 09:54:36 crc kubenswrapper[4854]: I1125 09:54:36.092984 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-9nqtc" event={"ID":"2b0d30d6-6825-4d29-b5bb-9ea86f790b6f","Type":"ContainerStarted","Data":"e29ef73b9ac9783268379b44edeb185548a4917b30f80adc97561b2870c11233"} Nov 25 09:54:36 crc kubenswrapper[4854]: I1125 09:54:36.095037 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-4tvd5" event={"ID":"7cff163e-525b-4280-ab48-35eb6d6dd242","Type":"ContainerStarted","Data":"b88b699b4f3bddf3ab76e28179b048992e7890cec9ed4af73e6063c7c8e7b904"} Nov 25 09:54:36 crc kubenswrapper[4854]: I1125 09:54:36.109985 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-7bgzh" event={"ID":"e3e51685-6e25-4bcd-8194-4faf0947962a","Type":"ContainerStarted","Data":"0216a66ff775e8518713e1f806fb153abc889df77c5b84b84b1897c97e0b8cfa"} Nov 25 09:54:36 crc kubenswrapper[4854]: I1125 09:54:36.136917 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-tcblk" event={"ID":"62c72ef0-d6f3-4b75-aa57-43d934de39e9","Type":"ContainerStarted","Data":"84c4e42ff5f9bda6a83a87fcd98aad952bd2ff881fa6d590b2ac0c71b0780bf0"} Nov 25 09:54:36 crc kubenswrapper[4854]: I1125 09:54:36.138515 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-lcbwz" event={"ID":"0ef84c87-7162-45bb-9622-48b2b37e50bd","Type":"ContainerStarted","Data":"8cbb5b6719cbbe7f261a38d951d57992d2e9368c20fd0cbe6c7bfc5f062e1ba6"} Nov 25 09:54:36 crc kubenswrapper[4854]: E1125 09:54:36.155259 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:78852f8ba332a5756c1551c126157f735279101a0fc3277ba4aa4db3478789dd\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-lcbwz" podUID="0ef84c87-7162-45bb-9622-48b2b37e50bd" Nov 25 09:54:36 crc kubenswrapper[4854]: I1125 09:54:36.173628 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-rwp8j" event={"ID":"17936560-9d8d-4c29-b12a-451abdc2787a","Type":"ContainerStarted","Data":"e7426e91a0171765a0142d9d80e4b5b73262b0c2f5268b884844b0ea5e724108"} Nov 25 09:54:36 crc kubenswrapper[4854]: I1125 09:54:36.184868 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-n9bk6" event={"ID":"77de3154-620e-407a-97e9-94d3dd90ced7","Type":"ContainerStarted","Data":"7759b6e676badbf44c7b481f2853f47b5552ec401cbe671cdd09f38b52ee4ffe"} Nov 25 09:54:36 crc kubenswrapper[4854]: I1125 09:54:36.209079 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cb74df96-vddrp" event={"ID":"0e528244-b216-46fa-95a8-8b0faf6a50df","Type":"ContainerStarted","Data":"a4fabe2c457a5414d2217bdbad1e71db7d091bda51594c8dc5c31293b0c6efe6"} Nov 25 09:54:37 crc kubenswrapper[4854]: E1125 09:54:37.258387 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-gwfmz" podUID="3879e5ff-7566-4cd8-bcac-e8c07a79f965" Nov 25 09:54:37 crc kubenswrapper[4854]: E1125 09:54:37.265929 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:78852f8ba332a5756c1551c126157f735279101a0fc3277ba4aa4db3478789dd\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-lcbwz" podUID="0ef84c87-7162-45bb-9622-48b2b37e50bd" Nov 25 09:54:37 crc kubenswrapper[4854]: I1125 09:54:37.745312 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/21f043af-1d20-48fd-a8eb-40cdbee6ab8c-webhook-certs\") pod \"openstack-operator-controller-manager-848ff5c487-2kbtl\" (UID: \"21f043af-1d20-48fd-a8eb-40cdbee6ab8c\") " pod="openstack-operators/openstack-operator-controller-manager-848ff5c487-2kbtl" Nov 25 09:54:37 crc kubenswrapper[4854]: I1125 09:54:37.745558 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/21f043af-1d20-48fd-a8eb-40cdbee6ab8c-metrics-certs\") pod \"openstack-operator-controller-manager-848ff5c487-2kbtl\" (UID: \"21f043af-1d20-48fd-a8eb-40cdbee6ab8c\") " pod="openstack-operators/openstack-operator-controller-manager-848ff5c487-2kbtl" Nov 25 09:54:37 crc kubenswrapper[4854]: I1125 09:54:37.753494 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/21f043af-1d20-48fd-a8eb-40cdbee6ab8c-webhook-certs\") pod \"openstack-operator-controller-manager-848ff5c487-2kbtl\" (UID: \"21f043af-1d20-48fd-a8eb-40cdbee6ab8c\") " pod="openstack-operators/openstack-operator-controller-manager-848ff5c487-2kbtl" Nov 25 09:54:37 crc kubenswrapper[4854]: I1125 09:54:37.774420 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/21f043af-1d20-48fd-a8eb-40cdbee6ab8c-metrics-certs\") pod \"openstack-operator-controller-manager-848ff5c487-2kbtl\" (UID: \"21f043af-1d20-48fd-a8eb-40cdbee6ab8c\") " pod="openstack-operators/openstack-operator-controller-manager-848ff5c487-2kbtl" Nov 25 09:54:37 crc kubenswrapper[4854]: I1125 09:54:37.802051 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-pkgng" Nov 25 09:54:37 crc kubenswrapper[4854]: I1125 09:54:37.810422 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-848ff5c487-2kbtl" Nov 25 09:54:47 crc kubenswrapper[4854]: E1125 09:54:47.073886 4854 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/barbican-operator@sha256:70cce55bcf89468c5d468ca2fc317bfc3dc5f2bef1c502df9faca2eb1293ede7" Nov 25 09:54:47 crc kubenswrapper[4854]: E1125 09:54:47.074548 4854 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/barbican-operator@sha256:70cce55bcf89468c5d468ca2fc317bfc3dc5f2bef1c502df9faca2eb1293ede7,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9b2tr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-operator-controller-manager-86dc4d89c8-pvjbb_openstack-operators(df2830c6-0e67-4aea-b14d-101a5323617b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:54:48 crc kubenswrapper[4854]: E1125 09:54:48.008116 4854 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ovn-operator@sha256:5d49d4594c66eda7b151746cc6e1d3c67c0129b4503eeb043a64ae8ec2da6a1b" Nov 25 09:54:48 crc kubenswrapper[4854]: E1125 09:54:48.008535 4854 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:5d49d4594c66eda7b151746cc6e1d3c67c0129b4503eeb043a64ae8ec2da6a1b,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2cn5d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-66cf5c67ff-tcblk_openstack-operators(62c72ef0-d6f3-4b75-aa57-43d934de39e9): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:54:48 crc kubenswrapper[4854]: E1125 09:54:48.589050 4854 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/octavia-operator@sha256:442c269d79163f8da75505019c02e9f0815837aaadcaddacb8e6c12df297ca13" Nov 25 09:54:48 crc kubenswrapper[4854]: E1125 09:54:48.589284 4854 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:442c269d79163f8da75505019c02e9f0815837aaadcaddacb8e6c12df297ca13,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xmctq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-fd75fd47d-wcjpf_openstack-operators(bcafa8f9-8abb-4015-8afc-6767b0485ad8): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:54:58 crc kubenswrapper[4854]: E1125 09:54:58.330112 4854 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/glance-operator@sha256:d38faa9070da05487afdaa9e261ad39274c2ed862daf42efa460a040431f1991" Nov 25 09:54:58 crc kubenswrapper[4854]: E1125 09:54:58.330965 4854 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/glance-operator@sha256:d38faa9070da05487afdaa9e261ad39274c2ed862daf42efa460a040431f1991,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-szvmv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-operator-controller-manager-68b95954c9-n99lm_openstack-operators(5ed548fa-16b5-4733-b798-a0819bc8e77d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:55:02 crc kubenswrapper[4854]: E1125 09:55:02.787377 4854 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7" Nov 25 09:55:02 crc kubenswrapper[4854]: E1125 09:55:02.788275 4854 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jcvh2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-79556f57fc-7bgzh_openstack-operators(e3e51685-6e25-4bcd-8194-4faf0947962a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:55:03 crc kubenswrapper[4854]: E1125 09:55:03.352180 4854 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/manila-operator@sha256:b749a5dd8bc718875c3f5e81b38d54d003be77ab92de4a3e9f9595566496a58a" Nov 25 09:55:03 crc kubenswrapper[4854]: E1125 09:55:03.352391 4854 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/manila-operator@sha256:b749a5dd8bc718875c3f5e81b38d54d003be77ab92de4a3e9f9595566496a58a,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ltbx2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-58bb8d67cc-4tvd5_openstack-operators(7cff163e-525b-4280-ab48-35eb6d6dd242): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:55:04 crc kubenswrapper[4854]: E1125 09:55:04.642150 4854 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/infra-operator@sha256:86df58f744c1d23233cc98f6ea17c8d6da637c50003d0fc8c100045594aa9894" Nov 25 09:55:04 crc kubenswrapper[4854]: E1125 09:55:04.642540 4854 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/infra-operator@sha256:86df58f744c1d23233cc98f6ea17c8d6da637c50003d0fc8c100045594aa9894,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{600 -3} {} 600m DecimalSI},memory: {{2147483648 0} {} 2Gi BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{536870912 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hkx99,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod infra-operator-controller-manager-d5cc86f4b-n9bk6_openstack-operators(77de3154-620e-407a-97e9-94d3dd90ced7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:55:05 crc kubenswrapper[4854]: E1125 09:55:05.652298 4854 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/test-operator@sha256:82207e753574d4be246f86c4b074500d66cf20214aa80f0a8525cf3287a35e6d" Nov 25 09:55:05 crc kubenswrapper[4854]: E1125 09:55:05.652465 4854 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:82207e753574d4be246f86c4b074500d66cf20214aa80f0a8525cf3287a35e6d,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-zbx7f,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5cb74df96-vddrp_openstack-operators(0e528244-b216-46fa-95a8-8b0faf6a50df): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:55:07 crc kubenswrapper[4854]: E1125 09:55:07.245125 4854 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/swift-operator@sha256:c0b5f124a37c1538042c0e63f0978429572e2a851d7f3a6eb80de09b86d755a0" Nov 25 09:55:07 crc kubenswrapper[4854]: E1125 09:55:07.245559 4854 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:c0b5f124a37c1538042c0e63f0978429572e2a851d7f3a6eb80de09b86d755a0,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-754vx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-6fdc4fcf86-rwp8j_openstack-operators(17936560-9d8d-4c29-b12a-451abdc2787a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:55:08 crc kubenswrapper[4854]: E1125 09:55:08.908044 4854 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying layer: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Nov 25 09:55:08 crc kubenswrapper[4854]: E1125 09:55:08.908609 4854 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9b2tr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-operator-controller-manager-86dc4d89c8-pvjbb_openstack-operators(df2830c6-0e67-4aea-b14d-101a5323617b): ErrImagePull: rpc error: code = Canceled desc = copying layer: context canceled" logger="UnhandledError" Nov 25 09:55:08 crc kubenswrapper[4854]: E1125 09:55:08.910282 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying layer: context canceled\"]" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-pvjbb" podUID="df2830c6-0e67-4aea-b14d-101a5323617b" Nov 25 09:55:09 crc kubenswrapper[4854]: E1125 09:55:09.233284 4854 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.65:5001/openstack-k8s-operators/telemetry-operator:e124eaeb2a77d0c6592b8877ac23b9b68c1e5cbc" Nov 25 09:55:09 crc kubenswrapper[4854]: E1125 09:55:09.233343 4854 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.65:5001/openstack-k8s-operators/telemetry-operator:e124eaeb2a77d0c6592b8877ac23b9b68c1e5cbc" Nov 25 09:55:09 crc kubenswrapper[4854]: E1125 09:55:09.233493 4854 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:38.102.83.65:5001/openstack-k8s-operators/telemetry-operator:e124eaeb2a77d0c6592b8877ac23b9b68c1e5cbc,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-dzpgf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-67b89c8998-d98c4_openstack-operators(91d4e118-8133-4531-8e70-afb240453f11): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:55:11 crc kubenswrapper[4854]: E1125 09:55:11.088917 4854 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f" Nov 25 09:55:11 crc kubenswrapper[4854]: E1125 09:55:11.089339 4854 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-mllbf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-864885998-9pstc_openstack-operators(0e7943a0-c710-4440-a8be-932c12cfd4de): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:55:11 crc kubenswrapper[4854]: I1125 09:55:11.541178 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-848ff5c487-2kbtl"] Nov 25 09:55:12 crc kubenswrapper[4854]: I1125 09:55:12.553900 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-848ff5c487-2kbtl" event={"ID":"21f043af-1d20-48fd-a8eb-40cdbee6ab8c","Type":"ContainerStarted","Data":"63bbf4509b6cbb48f405bad5a747b4bd56501640df997caef17386b382e3d99c"} Nov 25 09:55:13 crc kubenswrapper[4854]: I1125 09:55:13.580083 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-vfv6r" event={"ID":"e0afee56-5768-44f9-af4d-da496a95ae39","Type":"ContainerStarted","Data":"bde2e3c1774012b440f4a437c1cff3563101f827492e0607629025cfb80875a0"} Nov 25 09:55:14 crc kubenswrapper[4854]: I1125 09:55:14.598252 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-dlq5q" event={"ID":"10550c56-b051-4d65-a13c-3854e40d2869","Type":"ContainerStarted","Data":"a4b6446b76dd5be208ee428aca10dd95f892223d97188911dccb6ae9afd0213d"} Nov 25 09:55:14 crc kubenswrapper[4854]: I1125 09:55:14.607534 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-bjjdt" event={"ID":"5a9dc90e-3728-42c9-897c-7b28035196bd","Type":"ContainerStarted","Data":"abe1229f36f848b7d8a62e43b11e7f1bf1603eb35e68acb4081ec513071fae12"} Nov 25 09:55:14 crc kubenswrapper[4854]: I1125 09:55:14.608983 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-sks98" event={"ID":"d51e017c-9c85-443a-a5b9-b8b8969bb019","Type":"ContainerStarted","Data":"c4184ea9ee3728c19993d48a896a391daaf14cf9f77020508de9e1cee3c9c54d"} Nov 25 09:55:14 crc kubenswrapper[4854]: I1125 09:55:14.614656 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-zp5bz" event={"ID":"1da6b327-2319-4a92-9555-736d992a3348","Type":"ContainerStarted","Data":"bce58f5cebaf7a49eb8dd85c2200f8077334463043bfa2ac4e13edd781d7eac0"} Nov 25 09:55:14 crc kubenswrapper[4854]: I1125 09:55:14.620054 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-v88zs" event={"ID":"c0ebacaa-bc16-4876-87cb-011f523a59a4","Type":"ContainerStarted","Data":"0ac7b0bbeaf51e6a1d8c13420096a6a0118ac4f729f144f05b66721112380421"} Nov 25 09:55:14 crc kubenswrapper[4854]: I1125 09:55:14.621885 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-7l4fm" event={"ID":"5c0186e4-d72a-4281-ab41-d012f2d4d775","Type":"ContainerStarted","Data":"33a05b568561eb479681547a912b678bff7f358a5626fe0d32ad696ec0415404"} Nov 25 09:55:14 crc kubenswrapper[4854]: I1125 09:55:14.623245 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-848ff5c487-2kbtl" event={"ID":"21f043af-1d20-48fd-a8eb-40cdbee6ab8c","Type":"ContainerStarted","Data":"5ec105c9dce24471037090751e7a86812f8c785eeb6d2960037b743b372815fb"} Nov 25 09:55:14 crc kubenswrapper[4854]: I1125 09:55:14.624549 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-848ff5c487-2kbtl" Nov 25 09:55:14 crc kubenswrapper[4854]: I1125 09:55:14.661744 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-848ff5c487-2kbtl" podStartSLOduration=41.661720339 podStartE2EDuration="41.661720339s" podCreationTimestamp="2025-11-25 09:54:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:55:14.659859509 +0000 UTC m=+1120.512852875" watchObservedRunningTime="2025-11-25 09:55:14.661720339 +0000 UTC m=+1120.514713715" Nov 25 09:55:20 crc kubenswrapper[4854]: I1125 09:55:20.680191 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-pvjbb" event={"ID":"df2830c6-0e67-4aea-b14d-101a5323617b","Type":"ContainerStarted","Data":"6a5be592fd7e8e74e536ae76e789309bf539f8ed5200fd587d198ded9f3f005b"} Nov 25 09:55:20 crc kubenswrapper[4854]: I1125 09:55:20.681332 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jx54m" event={"ID":"13c658f9-13c2-43d5-9b8a-d30484e5943f","Type":"ContainerStarted","Data":"d20db1a116618bc88bd898bb2c1fa820520551b7262f6b620f8f165211553824"} Nov 25 09:55:20 crc kubenswrapper[4854]: I1125 09:55:20.682462 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-9nqtc" event={"ID":"2b0d30d6-6825-4d29-b5bb-9ea86f790b6f","Type":"ContainerStarted","Data":"9bc62a94785133c0122629c37c22cfd0d72aab0e61e5a3ba5d7d001d6342b973"} Nov 25 09:55:20 crc kubenswrapper[4854]: I1125 09:55:20.686232 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-lcbwz" event={"ID":"0ef84c87-7162-45bb-9622-48b2b37e50bd","Type":"ContainerStarted","Data":"41026fd1fff878c7870c9223ae7b3901fc01678af6dd05daeeb93ee5d14eee32"} Nov 25 09:55:20 crc kubenswrapper[4854]: I1125 09:55:20.688344 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-gwfmz" event={"ID":"3879e5ff-7566-4cd8-bcac-e8c07a79f965","Type":"ContainerStarted","Data":"5c0aab9cbde2f9245df2c1da9c5e1ea83cb84a1754cb23b7d2a7c914b7b02da4"} Nov 25 09:55:20 crc kubenswrapper[4854]: I1125 09:55:20.717183 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-gwfmz" podStartSLOduration=10.744879535 podStartE2EDuration="47.717159897s" podCreationTimestamp="2025-11-25 09:54:33 +0000 UTC" firstStartedPulling="2025-11-25 09:54:35.808341644 +0000 UTC m=+1081.661335020" lastFinishedPulling="2025-11-25 09:55:12.780622006 +0000 UTC m=+1118.633615382" observedRunningTime="2025-11-25 09:55:20.706477004 +0000 UTC m=+1126.559470380" watchObservedRunningTime="2025-11-25 09:55:20.717159897 +0000 UTC m=+1126.570153273" Nov 25 09:55:21 crc kubenswrapper[4854]: E1125 09:55:21.647997 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-tcblk" podUID="62c72ef0-d6f3-4b75-aa57-43d934de39e9" Nov 25 09:55:21 crc kubenswrapper[4854]: E1125 09:55:21.663978 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-wcjpf" podUID="bcafa8f9-8abb-4015-8afc-6767b0485ad8" Nov 25 09:55:21 crc kubenswrapper[4854]: I1125 09:55:21.704595 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-7l4fm" event={"ID":"5c0186e4-d72a-4281-ab41-d012f2d4d775","Type":"ContainerStarted","Data":"b2bac838830786b1dad037029d1b097449226907a3fcf0d83394a57a46cc3c9e"} Nov 25 09:55:21 crc kubenswrapper[4854]: I1125 09:55:21.704811 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-7l4fm" Nov 25 09:55:21 crc kubenswrapper[4854]: I1125 09:55:21.707194 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-7l4fm" Nov 25 09:55:21 crc kubenswrapper[4854]: I1125 09:55:21.711562 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-tcblk" event={"ID":"62c72ef0-d6f3-4b75-aa57-43d934de39e9","Type":"ContainerStarted","Data":"18ef89df0fd74bdc23581e39a57832c9cb6137dff12d45804aec3e295a7f3ba0"} Nov 25 09:55:21 crc kubenswrapper[4854]: I1125 09:55:21.718481 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-vfv6r" event={"ID":"e0afee56-5768-44f9-af4d-da496a95ae39","Type":"ContainerStarted","Data":"85e6cf63b111d4cf5bff9bc63b5056844b957e2106f07cef9391099a320fef41"} Nov 25 09:55:21 crc kubenswrapper[4854]: I1125 09:55:21.719022 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-vfv6r" Nov 25 09:55:21 crc kubenswrapper[4854]: I1125 09:55:21.720284 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-wcjpf" event={"ID":"bcafa8f9-8abb-4015-8afc-6767b0485ad8","Type":"ContainerStarted","Data":"9d37c92fd6352ba2f339cbd3e5f2d0ea22fa3007af6762cb666a0322d7d10afc"} Nov 25 09:55:21 crc kubenswrapper[4854]: I1125 09:55:21.723339 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-vfv6r" Nov 25 09:55:21 crc kubenswrapper[4854]: I1125 09:55:21.732215 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-sks98" event={"ID":"d51e017c-9c85-443a-a5b9-b8b8969bb019","Type":"ContainerStarted","Data":"d43945a504b166f669eec7c745929d507d1dee97bf3a02c570d4c2fa2cbc2db0"} Nov 25 09:55:21 crc kubenswrapper[4854]: I1125 09:55:21.733693 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-7l4fm" podStartSLOduration=3.05543999 podStartE2EDuration="49.733658142s" podCreationTimestamp="2025-11-25 09:54:32 +0000 UTC" firstStartedPulling="2025-11-25 09:54:34.528442533 +0000 UTC m=+1080.381435909" lastFinishedPulling="2025-11-25 09:55:21.206660685 +0000 UTC m=+1127.059654061" observedRunningTime="2025-11-25 09:55:21.729063936 +0000 UTC m=+1127.582057322" watchObservedRunningTime="2025-11-25 09:55:21.733658142 +0000 UTC m=+1127.586651518" Nov 25 09:55:21 crc kubenswrapper[4854]: I1125 09:55:21.733969 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-sks98" Nov 25 09:55:21 crc kubenswrapper[4854]: I1125 09:55:21.735028 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-sks98" Nov 25 09:55:21 crc kubenswrapper[4854]: I1125 09:55:21.737248 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-v88zs" event={"ID":"c0ebacaa-bc16-4876-87cb-011f523a59a4","Type":"ContainerStarted","Data":"fc901af1668a9469640459bbe8d77f4b837d6c043eb25f579ccb3ce00bfac40e"} Nov 25 09:55:21 crc kubenswrapper[4854]: I1125 09:55:21.738394 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-v88zs" Nov 25 09:55:21 crc kubenswrapper[4854]: I1125 09:55:21.742400 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-v88zs" Nov 25 09:55:21 crc kubenswrapper[4854]: I1125 09:55:21.817990 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-vfv6r" podStartSLOduration=2.365349239 podStartE2EDuration="49.817965815s" podCreationTimestamp="2025-11-25 09:54:32 +0000 UTC" firstStartedPulling="2025-11-25 09:54:33.78835467 +0000 UTC m=+1079.641348046" lastFinishedPulling="2025-11-25 09:55:21.240971246 +0000 UTC m=+1127.093964622" observedRunningTime="2025-11-25 09:55:21.803996902 +0000 UTC m=+1127.656990288" watchObservedRunningTime="2025-11-25 09:55:21.817965815 +0000 UTC m=+1127.670959181" Nov 25 09:55:21 crc kubenswrapper[4854]: I1125 09:55:21.865255 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-sks98" podStartSLOduration=3.12141331 podStartE2EDuration="49.865230822s" podCreationTimestamp="2025-11-25 09:54:32 +0000 UTC" firstStartedPulling="2025-11-25 09:54:34.493373341 +0000 UTC m=+1080.346366717" lastFinishedPulling="2025-11-25 09:55:21.237190853 +0000 UTC m=+1127.090184229" observedRunningTime="2025-11-25 09:55:21.855071363 +0000 UTC m=+1127.708064749" watchObservedRunningTime="2025-11-25 09:55:21.865230822 +0000 UTC m=+1127.718224198" Nov 25 09:55:21 crc kubenswrapper[4854]: I1125 09:55:21.888039 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-v88zs" podStartSLOduration=3.975039128 podStartE2EDuration="49.888018317s" podCreationTimestamp="2025-11-25 09:54:32 +0000 UTC" firstStartedPulling="2025-11-25 09:54:35.326840065 +0000 UTC m=+1081.179833441" lastFinishedPulling="2025-11-25 09:55:21.239819254 +0000 UTC m=+1127.092812630" observedRunningTime="2025-11-25 09:55:21.88231235 +0000 UTC m=+1127.735305726" watchObservedRunningTime="2025-11-25 09:55:21.888018317 +0000 UTC m=+1127.741011693" Nov 25 09:55:21 crc kubenswrapper[4854]: E1125 09:55:21.897625 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-7bgzh" podUID="e3e51685-6e25-4bcd-8194-4faf0947962a" Nov 25 09:55:22 crc kubenswrapper[4854]: E1125 09:55:22.084279 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-rwp8j" podUID="17936560-9d8d-4c29-b12a-451abdc2787a" Nov 25 09:55:22 crc kubenswrapper[4854]: E1125 09:55:22.372730 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/telemetry-operator-controller-manager-67b89c8998-d98c4" podUID="91d4e118-8133-4531-8e70-afb240453f11" Nov 25 09:55:22 crc kubenswrapper[4854]: E1125 09:55:22.661894 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/watcher-operator-controller-manager-864885998-9pstc" podUID="0e7943a0-c710-4440-a8be-932c12cfd4de" Nov 25 09:55:22 crc kubenswrapper[4854]: I1125 09:55:22.745148 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-bjjdt" event={"ID":"5a9dc90e-3728-42c9-897c-7b28035196bd","Type":"ContainerStarted","Data":"5fb0125cec2b43ac901ddc1cae710ad5792370d667b63cb2ef4708502eac9239"} Nov 25 09:55:22 crc kubenswrapper[4854]: I1125 09:55:22.745551 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-bjjdt" Nov 25 09:55:22 crc kubenswrapper[4854]: I1125 09:55:22.746291 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-7bgzh" event={"ID":"e3e51685-6e25-4bcd-8194-4faf0947962a","Type":"ContainerStarted","Data":"6079fdec67f4f02ee73d85854e784ad53b783f690ca50d638ea31b8cb972a66d"} Nov 25 09:55:22 crc kubenswrapper[4854]: I1125 09:55:22.747197 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-bjjdt" Nov 25 09:55:22 crc kubenswrapper[4854]: I1125 09:55:22.749237 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-lcbwz" event={"ID":"0ef84c87-7162-45bb-9622-48b2b37e50bd","Type":"ContainerStarted","Data":"723b570c5b9ab9f9864935f3dd0462137c3771cc7c75a2c9c18af8e3e08b9683"} Nov 25 09:55:22 crc kubenswrapper[4854]: I1125 09:55:22.749350 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-lcbwz" Nov 25 09:55:22 crc kubenswrapper[4854]: I1125 09:55:22.750380 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-9pstc" event={"ID":"0e7943a0-c710-4440-a8be-932c12cfd4de","Type":"ContainerStarted","Data":"413c519ac00304368da2738f5b6f27a6dc10d5733d6985f2906b1f66203f2fa0"} Nov 25 09:55:22 crc kubenswrapper[4854]: I1125 09:55:22.752837 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-zp5bz" event={"ID":"1da6b327-2319-4a92-9555-736d992a3348","Type":"ContainerStarted","Data":"856b12021e4c5f810bc8b1ac4ef8561f0317329d3c35dda14bbd956d5c34f2fb"} Nov 25 09:55:22 crc kubenswrapper[4854]: I1125 09:55:22.753590 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-774b86978c-zp5bz" Nov 25 09:55:22 crc kubenswrapper[4854]: I1125 09:55:22.755949 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-774b86978c-zp5bz" Nov 25 09:55:22 crc kubenswrapper[4854]: I1125 09:55:22.756047 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-9nqtc" event={"ID":"2b0d30d6-6825-4d29-b5bb-9ea86f790b6f","Type":"ContainerStarted","Data":"83f3a24461eb95f757c189572902e8fa00dff73cff73db3c2d0f22b1cf3e385e"} Nov 25 09:55:22 crc kubenswrapper[4854]: I1125 09:55:22.756595 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-9nqtc" Nov 25 09:55:22 crc kubenswrapper[4854]: I1125 09:55:22.758341 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-67b89c8998-d98c4" event={"ID":"91d4e118-8133-4531-8e70-afb240453f11","Type":"ContainerStarted","Data":"344b4e117091a82e3e6032defa051c0671b87db9af0a60ba5db28173025f0e4e"} Nov 25 09:55:22 crc kubenswrapper[4854]: I1125 09:55:22.776980 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-bjjdt" podStartSLOduration=3.662886993 podStartE2EDuration="50.776957332s" podCreationTimestamp="2025-11-25 09:54:32 +0000 UTC" firstStartedPulling="2025-11-25 09:54:34.124467271 +0000 UTC m=+1079.977460637" lastFinishedPulling="2025-11-25 09:55:21.2385376 +0000 UTC m=+1127.091530976" observedRunningTime="2025-11-25 09:55:22.763141603 +0000 UTC m=+1128.616134999" watchObservedRunningTime="2025-11-25 09:55:22.776957332 +0000 UTC m=+1128.629950708" Nov 25 09:55:22 crc kubenswrapper[4854]: I1125 09:55:22.779268 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-rwp8j" event={"ID":"17936560-9d8d-4c29-b12a-451abdc2787a","Type":"ContainerStarted","Data":"22c1396b214d59cabc0e080d4b13444824878d7aeba24e5b77b2452231c89e1e"} Nov 25 09:55:22 crc kubenswrapper[4854]: I1125 09:55:22.782351 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-pvjbb" event={"ID":"df2830c6-0e67-4aea-b14d-101a5323617b","Type":"ContainerStarted","Data":"3c462b1904d651a9695f25f6ff7867c555d57c75ddb6ce5720d17029ec656d48"} Nov 25 09:55:22 crc kubenswrapper[4854]: I1125 09:55:22.791648 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-774b86978c-zp5bz" podStartSLOduration=3.612628365 podStartE2EDuration="50.791624395s" podCreationTimestamp="2025-11-25 09:54:32 +0000 UTC" firstStartedPulling="2025-11-25 09:54:34.06137771 +0000 UTC m=+1079.914371086" lastFinishedPulling="2025-11-25 09:55:21.24037374 +0000 UTC m=+1127.093367116" observedRunningTime="2025-11-25 09:55:22.786429283 +0000 UTC m=+1128.639422669" watchObservedRunningTime="2025-11-25 09:55:22.791624395 +0000 UTC m=+1128.644617771" Nov 25 09:55:22 crc kubenswrapper[4854]: I1125 09:55:22.816587 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-9nqtc" podStartSLOduration=4.127035397 podStartE2EDuration="50.816568729s" podCreationTimestamp="2025-11-25 09:54:32 +0000 UTC" firstStartedPulling="2025-11-25 09:54:35.395807057 +0000 UTC m=+1081.248800433" lastFinishedPulling="2025-11-25 09:55:22.085340389 +0000 UTC m=+1127.938333765" observedRunningTime="2025-11-25 09:55:22.809944608 +0000 UTC m=+1128.662937974" watchObservedRunningTime="2025-11-25 09:55:22.816568729 +0000 UTC m=+1128.669562105" Nov 25 09:55:22 crc kubenswrapper[4854]: I1125 09:55:22.849017 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-lcbwz" podStartSLOduration=4.940393439 podStartE2EDuration="50.848994869s" podCreationTimestamp="2025-11-25 09:54:32 +0000 UTC" firstStartedPulling="2025-11-25 09:54:35.808335944 +0000 UTC m=+1081.661329320" lastFinishedPulling="2025-11-25 09:55:21.716937374 +0000 UTC m=+1127.569930750" observedRunningTime="2025-11-25 09:55:22.843609231 +0000 UTC m=+1128.696602607" watchObservedRunningTime="2025-11-25 09:55:22.848994869 +0000 UTC m=+1128.701988255" Nov 25 09:55:22 crc kubenswrapper[4854]: I1125 09:55:22.957985 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-pvjbb" podStartSLOduration=2.542484198 podStartE2EDuration="50.957968588s" podCreationTimestamp="2025-11-25 09:54:32 +0000 UTC" firstStartedPulling="2025-11-25 09:54:33.668445491 +0000 UTC m=+1079.521438867" lastFinishedPulling="2025-11-25 09:55:22.083929891 +0000 UTC m=+1127.936923257" observedRunningTime="2025-11-25 09:55:22.953944368 +0000 UTC m=+1128.806937744" watchObservedRunningTime="2025-11-25 09:55:22.957968588 +0000 UTC m=+1128.810961964" Nov 25 09:55:23 crc kubenswrapper[4854]: E1125 09:55:23.427362 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-n99lm" podUID="5ed548fa-16b5-4733-b798-a0819bc8e77d" Nov 25 09:55:23 crc kubenswrapper[4854]: E1125 09:55:23.487910 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/test-operator-controller-manager-5cb74df96-vddrp" podUID="0e528244-b216-46fa-95a8-8b0faf6a50df" Nov 25 09:55:23 crc kubenswrapper[4854]: E1125 09:55:23.487955 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-4tvd5" podUID="7cff163e-525b-4280-ab48-35eb6d6dd242" Nov 25 09:55:23 crc kubenswrapper[4854]: I1125 09:55:23.792836 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-n99lm" event={"ID":"5ed548fa-16b5-4733-b798-a0819bc8e77d","Type":"ContainerStarted","Data":"250363b6b3607c73b83f050cb5b33988f5a18cb013ca41288629e3e94c6fcf51"} Nov 25 09:55:23 crc kubenswrapper[4854]: I1125 09:55:23.795781 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-wcjpf" event={"ID":"bcafa8f9-8abb-4015-8afc-6767b0485ad8","Type":"ContainerStarted","Data":"cf3862393b6540a50ddca38f8cc0d97e77c9ccd39b5b2dcafadaae06516d072a"} Nov 25 09:55:23 crc kubenswrapper[4854]: I1125 09:55:23.795914 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-wcjpf" Nov 25 09:55:23 crc kubenswrapper[4854]: I1125 09:55:23.797595 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-4tvd5" event={"ID":"7cff163e-525b-4280-ab48-35eb6d6dd242","Type":"ContainerStarted","Data":"11b5f5dba35618bb0c118de7c8ac25f1d29112985ecda98a7d1029f0794a241b"} Nov 25 09:55:23 crc kubenswrapper[4854]: I1125 09:55:23.799454 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cb74df96-vddrp" event={"ID":"0e528244-b216-46fa-95a8-8b0faf6a50df","Type":"ContainerStarted","Data":"f2ab4e3e88ad54e4c3c154da6f468d7e82f9046de92dbd2cf4dc06f27e8db1b0"} Nov 25 09:55:23 crc kubenswrapper[4854]: I1125 09:55:23.799802 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-pvjbb" Nov 25 09:55:23 crc kubenswrapper[4854]: I1125 09:55:23.874081 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-wcjpf" podStartSLOduration=4.299102538 podStartE2EDuration="51.874063479s" podCreationTimestamp="2025-11-25 09:54:32 +0000 UTC" firstStartedPulling="2025-11-25 09:54:35.288967527 +0000 UTC m=+1081.141960903" lastFinishedPulling="2025-11-25 09:55:22.863928468 +0000 UTC m=+1128.716921844" observedRunningTime="2025-11-25 09:55:23.850469842 +0000 UTC m=+1129.703463208" watchObservedRunningTime="2025-11-25 09:55:23.874063479 +0000 UTC m=+1129.727056855" Nov 25 09:55:24 crc kubenswrapper[4854]: I1125 09:55:24.164924 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-lcbwz" Nov 25 09:55:24 crc kubenswrapper[4854]: I1125 09:55:24.812225 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-67b89c8998-d98c4" event={"ID":"91d4e118-8133-4531-8e70-afb240453f11","Type":"ContainerStarted","Data":"44df6150445c8cd6b2343d5f91764db5a851f54ab97d731372c13f9d65f00f10"} Nov 25 09:55:24 crc kubenswrapper[4854]: I1125 09:55:24.812562 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-67b89c8998-d98c4" Nov 25 09:55:24 crc kubenswrapper[4854]: I1125 09:55:24.814262 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-dlq5q" event={"ID":"10550c56-b051-4d65-a13c-3854e40d2869","Type":"ContainerStarted","Data":"d279b201e5df8dd2e744d56e858c8a2deb6cee886ea69be02d47c10f7f22d797"} Nov 25 09:55:24 crc kubenswrapper[4854]: I1125 09:55:24.814748 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-dlq5q" Nov 25 09:55:24 crc kubenswrapper[4854]: I1125 09:55:24.816435 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-n9bk6" event={"ID":"77de3154-620e-407a-97e9-94d3dd90ced7","Type":"ContainerStarted","Data":"f02217d106396f697275266edf189597f17c3cd4aba959321f6439c588578be6"} Nov 25 09:55:24 crc kubenswrapper[4854]: I1125 09:55:24.818062 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-dlq5q" Nov 25 09:55:24 crc kubenswrapper[4854]: I1125 09:55:24.820909 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jx54m" event={"ID":"13c658f9-13c2-43d5-9b8a-d30484e5943f","Type":"ContainerStarted","Data":"407cb64d8beffce7ec22efe9901bd11a191970924a56623c2687d36aa75611ce"} Nov 25 09:55:24 crc kubenswrapper[4854]: I1125 09:55:24.820956 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jx54m" Nov 25 09:55:24 crc kubenswrapper[4854]: I1125 09:55:24.824472 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-9nqtc" Nov 25 09:55:24 crc kubenswrapper[4854]: I1125 09:55:24.825068 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-pvjbb" Nov 25 09:55:24 crc kubenswrapper[4854]: I1125 09:55:24.826086 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jx54m" Nov 25 09:55:24 crc kubenswrapper[4854]: I1125 09:55:24.839133 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-67b89c8998-d98c4" podStartSLOduration=4.900078324 podStartE2EDuration="52.839116893s" podCreationTimestamp="2025-11-25 09:54:32 +0000 UTC" firstStartedPulling="2025-11-25 09:54:35.709137183 +0000 UTC m=+1081.562130559" lastFinishedPulling="2025-11-25 09:55:23.648175752 +0000 UTC m=+1129.501169128" observedRunningTime="2025-11-25 09:55:24.829291304 +0000 UTC m=+1130.682284710" watchObservedRunningTime="2025-11-25 09:55:24.839116893 +0000 UTC m=+1130.692110259" Nov 25 09:55:24 crc kubenswrapper[4854]: I1125 09:55:24.864612 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-dlq5q" podStartSLOduration=4.3309334 podStartE2EDuration="52.864589922s" podCreationTimestamp="2025-11-25 09:54:32 +0000 UTC" firstStartedPulling="2025-11-25 09:54:34.531811165 +0000 UTC m=+1080.384804541" lastFinishedPulling="2025-11-25 09:55:23.065467687 +0000 UTC m=+1128.918461063" observedRunningTime="2025-11-25 09:55:24.845347705 +0000 UTC m=+1130.698341081" watchObservedRunningTime="2025-11-25 09:55:24.864589922 +0000 UTC m=+1130.717583298" Nov 25 09:55:24 crc kubenswrapper[4854]: I1125 09:55:24.925933 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jx54m" podStartSLOduration=4.117263628 podStartE2EDuration="52.925910694s" podCreationTimestamp="2025-11-25 09:54:32 +0000 UTC" firstStartedPulling="2025-11-25 09:54:34.239826805 +0000 UTC m=+1080.092820181" lastFinishedPulling="2025-11-25 09:55:23.048473871 +0000 UTC m=+1128.901467247" observedRunningTime="2025-11-25 09:55:24.921990817 +0000 UTC m=+1130.774984193" watchObservedRunningTime="2025-11-25 09:55:24.925910694 +0000 UTC m=+1130.778904080" Nov 25 09:55:24 crc kubenswrapper[4854]: E1125 09:55:24.961840 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-n9bk6" podUID="77de3154-620e-407a-97e9-94d3dd90ced7" Nov 25 09:55:25 crc kubenswrapper[4854]: I1125 09:55:25.029296 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:55:25 crc kubenswrapper[4854]: I1125 09:55:25.029342 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:55:25 crc kubenswrapper[4854]: I1125 09:55:25.832393 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-9pstc" event={"ID":"0e7943a0-c710-4440-a8be-932c12cfd4de","Type":"ContainerStarted","Data":"66e5689e0bde9c2e42a7ef86817acf789641ddee824cb1dedfb84b65074d08bc"} Nov 25 09:55:25 crc kubenswrapper[4854]: I1125 09:55:25.833903 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-7bgzh" event={"ID":"e3e51685-6e25-4bcd-8194-4faf0947962a","Type":"ContainerStarted","Data":"18eed53a6b44112a27b0bca1a92aa97368a4569136c8a77d393b9e7741f2186c"} Nov 25 09:55:26 crc kubenswrapper[4854]: I1125 09:55:26.845107 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-rwp8j" event={"ID":"17936560-9d8d-4c29-b12a-451abdc2787a","Type":"ContainerStarted","Data":"e0b6250556f51ec8b6f252d5b77e4c226b57bacce6a61d8919985517fb482a7c"} Nov 25 09:55:26 crc kubenswrapper[4854]: I1125 09:55:26.845460 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-rwp8j" Nov 25 09:55:26 crc kubenswrapper[4854]: I1125 09:55:26.848112 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-4tvd5" event={"ID":"7cff163e-525b-4280-ab48-35eb6d6dd242","Type":"ContainerStarted","Data":"686baa6ddfff50369334947611a80e6cf7ddc85ba47396b9736d1594f6ee65f5"} Nov 25 09:55:26 crc kubenswrapper[4854]: I1125 09:55:26.849663 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cb74df96-vddrp" event={"ID":"0e528244-b216-46fa-95a8-8b0faf6a50df","Type":"ContainerStarted","Data":"a8dad83638fd58090eba5285e3ea4433510b00957fefb766b8425278305b4812"} Nov 25 09:55:26 crc kubenswrapper[4854]: I1125 09:55:26.849763 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5cb74df96-vddrp" Nov 25 09:55:26 crc kubenswrapper[4854]: I1125 09:55:26.851439 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-tcblk" event={"ID":"62c72ef0-d6f3-4b75-aa57-43d934de39e9","Type":"ContainerStarted","Data":"9ace68cb320f34e5bc646b0a539f439a3b83b90d9ccdd9de897bef87b2cb3cc0"} Nov 25 09:55:26 crc kubenswrapper[4854]: I1125 09:55:26.851571 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-tcblk" Nov 25 09:55:26 crc kubenswrapper[4854]: I1125 09:55:26.853401 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-n99lm" event={"ID":"5ed548fa-16b5-4733-b798-a0819bc8e77d","Type":"ContainerStarted","Data":"ce93cb09f0eaa56012beea7fe906b30523736df6a9a87c4ce037a31782c010d4"} Nov 25 09:55:26 crc kubenswrapper[4854]: I1125 09:55:26.853704 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-864885998-9pstc" Nov 25 09:55:26 crc kubenswrapper[4854]: I1125 09:55:26.865103 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-rwp8j" podStartSLOduration=5.893563547 podStartE2EDuration="54.86507906s" podCreationTimestamp="2025-11-25 09:54:32 +0000 UTC" firstStartedPulling="2025-11-25 09:54:35.609849819 +0000 UTC m=+1081.462843195" lastFinishedPulling="2025-11-25 09:55:24.581365332 +0000 UTC m=+1130.434358708" observedRunningTime="2025-11-25 09:55:26.860559107 +0000 UTC m=+1132.713552503" watchObservedRunningTime="2025-11-25 09:55:26.86507906 +0000 UTC m=+1132.718072436" Nov 25 09:55:26 crc kubenswrapper[4854]: I1125 09:55:26.883826 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-864885998-9pstc" podStartSLOduration=5.959073625 podStartE2EDuration="54.883795104s" podCreationTimestamp="2025-11-25 09:54:32 +0000 UTC" firstStartedPulling="2025-11-25 09:54:35.654123974 +0000 UTC m=+1081.507117360" lastFinishedPulling="2025-11-25 09:55:24.578845463 +0000 UTC m=+1130.431838839" observedRunningTime="2025-11-25 09:55:26.88219079 +0000 UTC m=+1132.735184166" watchObservedRunningTime="2025-11-25 09:55:26.883795104 +0000 UTC m=+1132.736788480" Nov 25 09:55:26 crc kubenswrapper[4854]: I1125 09:55:26.904795 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-tcblk" podStartSLOduration=5.652360741 podStartE2EDuration="54.90477414s" podCreationTimestamp="2025-11-25 09:54:32 +0000 UTC" firstStartedPulling="2025-11-25 09:54:35.326459265 +0000 UTC m=+1081.179452631" lastFinishedPulling="2025-11-25 09:55:24.578872634 +0000 UTC m=+1130.431866030" observedRunningTime="2025-11-25 09:55:26.903115224 +0000 UTC m=+1132.756108620" watchObservedRunningTime="2025-11-25 09:55:26.90477414 +0000 UTC m=+1132.757767516" Nov 25 09:55:26 crc kubenswrapper[4854]: I1125 09:55:26.925916 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-7bgzh" podStartSLOduration=5.687757762 podStartE2EDuration="54.925893389s" podCreationTimestamp="2025-11-25 09:54:32 +0000 UTC" firstStartedPulling="2025-11-25 09:54:35.340906981 +0000 UTC m=+1081.193900357" lastFinishedPulling="2025-11-25 09:55:24.579042608 +0000 UTC m=+1130.432035984" observedRunningTime="2025-11-25 09:55:26.919280028 +0000 UTC m=+1132.772273424" watchObservedRunningTime="2025-11-25 09:55:26.925893389 +0000 UTC m=+1132.778886765" Nov 25 09:55:26 crc kubenswrapper[4854]: I1125 09:55:26.939353 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-5cb74df96-vddrp" podStartSLOduration=4.703561892 podStartE2EDuration="54.939335338s" podCreationTimestamp="2025-11-25 09:54:32 +0000 UTC" firstStartedPulling="2025-11-25 09:54:35.616396928 +0000 UTC m=+1081.469390304" lastFinishedPulling="2025-11-25 09:55:25.852170374 +0000 UTC m=+1131.705163750" observedRunningTime="2025-11-25 09:55:26.936904171 +0000 UTC m=+1132.789897567" watchObservedRunningTime="2025-11-25 09:55:26.939335338 +0000 UTC m=+1132.792328714" Nov 25 09:55:27 crc kubenswrapper[4854]: I1125 09:55:27.816815 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-848ff5c487-2kbtl" Nov 25 09:55:27 crc kubenswrapper[4854]: I1125 09:55:27.873725 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-n9bk6" event={"ID":"77de3154-620e-407a-97e9-94d3dd90ced7","Type":"ContainerStarted","Data":"f5b5d90b68784e7b915a241836a35a6af1590a2ee1c64a5bd51a74533f99441c"} Nov 25 09:55:27 crc kubenswrapper[4854]: I1125 09:55:27.873814 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-n99lm" Nov 25 09:55:27 crc kubenswrapper[4854]: I1125 09:55:27.874435 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-4tvd5" Nov 25 09:55:27 crc kubenswrapper[4854]: I1125 09:55:27.874504 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-n9bk6" Nov 25 09:55:27 crc kubenswrapper[4854]: I1125 09:55:27.899043 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-n99lm" podStartSLOduration=3.876528585 podStartE2EDuration="55.899024105s" podCreationTimestamp="2025-11-25 09:54:32 +0000 UTC" firstStartedPulling="2025-11-25 09:54:33.830150817 +0000 UTC m=+1079.683144193" lastFinishedPulling="2025-11-25 09:55:25.852646337 +0000 UTC m=+1131.705639713" observedRunningTime="2025-11-25 09:55:27.889427811 +0000 UTC m=+1133.742421207" watchObservedRunningTime="2025-11-25 09:55:27.899024105 +0000 UTC m=+1133.752017481" Nov 25 09:55:27 crc kubenswrapper[4854]: I1125 09:55:27.907362 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-n9bk6" podStartSLOduration=4.482747445 podStartE2EDuration="55.907340053s" podCreationTimestamp="2025-11-25 09:54:32 +0000 UTC" firstStartedPulling="2025-11-25 09:54:35.779939165 +0000 UTC m=+1081.632932541" lastFinishedPulling="2025-11-25 09:55:27.204531773 +0000 UTC m=+1133.057525149" observedRunningTime="2025-11-25 09:55:27.904931887 +0000 UTC m=+1133.757925273" watchObservedRunningTime="2025-11-25 09:55:27.907340053 +0000 UTC m=+1133.760333439" Nov 25 09:55:27 crc kubenswrapper[4854]: I1125 09:55:27.926236 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-4tvd5" podStartSLOduration=5.115976345 podStartE2EDuration="55.92621696s" podCreationTimestamp="2025-11-25 09:54:32 +0000 UTC" firstStartedPulling="2025-11-25 09:54:35.439334451 +0000 UTC m=+1081.292327827" lastFinishedPulling="2025-11-25 09:55:26.249575066 +0000 UTC m=+1132.102568442" observedRunningTime="2025-11-25 09:55:27.918991883 +0000 UTC m=+1133.771985259" watchObservedRunningTime="2025-11-25 09:55:27.92621696 +0000 UTC m=+1133.779210336" Nov 25 09:55:32 crc kubenswrapper[4854]: I1125 09:55:32.685695 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-n99lm" Nov 25 09:55:33 crc kubenswrapper[4854]: I1125 09:55:33.262973 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-4tvd5" Nov 25 09:55:33 crc kubenswrapper[4854]: I1125 09:55:33.344860 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-7bgzh" Nov 25 09:55:33 crc kubenswrapper[4854]: I1125 09:55:33.347930 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-7bgzh" Nov 25 09:55:33 crc kubenswrapper[4854]: I1125 09:55:33.423566 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-wcjpf" Nov 25 09:55:33 crc kubenswrapper[4854]: I1125 09:55:33.597378 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-tcblk" Nov 25 09:55:34 crc kubenswrapper[4854]: I1125 09:55:34.006138 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-rwp8j" Nov 25 09:55:34 crc kubenswrapper[4854]: I1125 09:55:34.091127 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-67b89c8998-d98c4" Nov 25 09:55:34 crc kubenswrapper[4854]: I1125 09:55:34.122066 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5cb74df96-vddrp" Nov 25 09:55:34 crc kubenswrapper[4854]: I1125 09:55:34.149140 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-864885998-9pstc" Nov 25 09:55:34 crc kubenswrapper[4854]: I1125 09:55:34.550422 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-n9bk6" Nov 25 09:55:40 crc kubenswrapper[4854]: I1125 09:55:40.197600 4854 scope.go:117] "RemoveContainer" containerID="07a087d2525fa07b3b466775c1277f94400ea5b7fe2fd8784f7be2647677db1f" Nov 25 09:55:40 crc kubenswrapper[4854]: I1125 09:55:40.218712 4854 scope.go:117] "RemoveContainer" containerID="fd43b2c37a9bbef9173e21b59eacb27d90acf3550838442149182b604c27184c" Nov 25 09:55:40 crc kubenswrapper[4854]: I1125 09:55:40.249043 4854 scope.go:117] "RemoveContainer" containerID="ad3b45e96b90ac8838c0384152ddf852841b1a5b007598b6ffe019aba881c4c7" Nov 25 09:55:50 crc kubenswrapper[4854]: I1125 09:55:50.152304 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-r8fm6"] Nov 25 09:55:50 crc kubenswrapper[4854]: I1125 09:55:50.154623 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-r8fm6" Nov 25 09:55:50 crc kubenswrapper[4854]: I1125 09:55:50.158235 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 25 09:55:50 crc kubenswrapper[4854]: I1125 09:55:50.158275 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-2d4hd" Nov 25 09:55:50 crc kubenswrapper[4854]: I1125 09:55:50.161952 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 25 09:55:50 crc kubenswrapper[4854]: I1125 09:55:50.162051 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 25 09:55:50 crc kubenswrapper[4854]: I1125 09:55:50.166724 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v7zq4\" (UniqueName: \"kubernetes.io/projected/1e8f46ac-8fb7-4161-85bb-25af767745b0-kube-api-access-v7zq4\") pod \"dnsmasq-dns-675f4bcbfc-r8fm6\" (UID: \"1e8f46ac-8fb7-4161-85bb-25af767745b0\") " pod="openstack/dnsmasq-dns-675f4bcbfc-r8fm6" Nov 25 09:55:50 crc kubenswrapper[4854]: I1125 09:55:50.167017 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e8f46ac-8fb7-4161-85bb-25af767745b0-config\") pod \"dnsmasq-dns-675f4bcbfc-r8fm6\" (UID: \"1e8f46ac-8fb7-4161-85bb-25af767745b0\") " pod="openstack/dnsmasq-dns-675f4bcbfc-r8fm6" Nov 25 09:55:50 crc kubenswrapper[4854]: I1125 09:55:50.170894 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-r8fm6"] Nov 25 09:55:50 crc kubenswrapper[4854]: I1125 09:55:50.221279 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-qmvj4"] Nov 25 09:55:50 crc kubenswrapper[4854]: I1125 09:55:50.222759 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-qmvj4" Nov 25 09:55:50 crc kubenswrapper[4854]: I1125 09:55:50.230936 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 25 09:55:50 crc kubenswrapper[4854]: I1125 09:55:50.246623 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-qmvj4"] Nov 25 09:55:50 crc kubenswrapper[4854]: I1125 09:55:50.269708 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v7zq4\" (UniqueName: \"kubernetes.io/projected/1e8f46ac-8fb7-4161-85bb-25af767745b0-kube-api-access-v7zq4\") pod \"dnsmasq-dns-675f4bcbfc-r8fm6\" (UID: \"1e8f46ac-8fb7-4161-85bb-25af767745b0\") " pod="openstack/dnsmasq-dns-675f4bcbfc-r8fm6" Nov 25 09:55:50 crc kubenswrapper[4854]: I1125 09:55:50.269779 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79-config\") pod \"dnsmasq-dns-78dd6ddcc-qmvj4\" (UID: \"181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79\") " pod="openstack/dnsmasq-dns-78dd6ddcc-qmvj4" Nov 25 09:55:50 crc kubenswrapper[4854]: I1125 09:55:50.269812 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-qmvj4\" (UID: \"181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79\") " pod="openstack/dnsmasq-dns-78dd6ddcc-qmvj4" Nov 25 09:55:50 crc kubenswrapper[4854]: I1125 09:55:50.269846 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e8f46ac-8fb7-4161-85bb-25af767745b0-config\") pod \"dnsmasq-dns-675f4bcbfc-r8fm6\" (UID: \"1e8f46ac-8fb7-4161-85bb-25af767745b0\") " pod="openstack/dnsmasq-dns-675f4bcbfc-r8fm6" Nov 25 09:55:50 crc kubenswrapper[4854]: I1125 09:55:50.269894 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pdvlc\" (UniqueName: \"kubernetes.io/projected/181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79-kube-api-access-pdvlc\") pod \"dnsmasq-dns-78dd6ddcc-qmvj4\" (UID: \"181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79\") " pod="openstack/dnsmasq-dns-78dd6ddcc-qmvj4" Nov 25 09:55:50 crc kubenswrapper[4854]: I1125 09:55:50.271200 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e8f46ac-8fb7-4161-85bb-25af767745b0-config\") pod \"dnsmasq-dns-675f4bcbfc-r8fm6\" (UID: \"1e8f46ac-8fb7-4161-85bb-25af767745b0\") " pod="openstack/dnsmasq-dns-675f4bcbfc-r8fm6" Nov 25 09:55:50 crc kubenswrapper[4854]: I1125 09:55:50.291475 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v7zq4\" (UniqueName: \"kubernetes.io/projected/1e8f46ac-8fb7-4161-85bb-25af767745b0-kube-api-access-v7zq4\") pod \"dnsmasq-dns-675f4bcbfc-r8fm6\" (UID: \"1e8f46ac-8fb7-4161-85bb-25af767745b0\") " pod="openstack/dnsmasq-dns-675f4bcbfc-r8fm6" Nov 25 09:55:50 crc kubenswrapper[4854]: I1125 09:55:50.371383 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79-config\") pod \"dnsmasq-dns-78dd6ddcc-qmvj4\" (UID: \"181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79\") " pod="openstack/dnsmasq-dns-78dd6ddcc-qmvj4" Nov 25 09:55:50 crc kubenswrapper[4854]: I1125 09:55:50.371439 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-qmvj4\" (UID: \"181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79\") " pod="openstack/dnsmasq-dns-78dd6ddcc-qmvj4" Nov 25 09:55:50 crc kubenswrapper[4854]: I1125 09:55:50.371487 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pdvlc\" (UniqueName: \"kubernetes.io/projected/181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79-kube-api-access-pdvlc\") pod \"dnsmasq-dns-78dd6ddcc-qmvj4\" (UID: \"181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79\") " pod="openstack/dnsmasq-dns-78dd6ddcc-qmvj4" Nov 25 09:55:50 crc kubenswrapper[4854]: I1125 09:55:50.372251 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79-config\") pod \"dnsmasq-dns-78dd6ddcc-qmvj4\" (UID: \"181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79\") " pod="openstack/dnsmasq-dns-78dd6ddcc-qmvj4" Nov 25 09:55:50 crc kubenswrapper[4854]: I1125 09:55:50.372383 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-qmvj4\" (UID: \"181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79\") " pod="openstack/dnsmasq-dns-78dd6ddcc-qmvj4" Nov 25 09:55:50 crc kubenswrapper[4854]: I1125 09:55:50.393699 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pdvlc\" (UniqueName: \"kubernetes.io/projected/181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79-kube-api-access-pdvlc\") pod \"dnsmasq-dns-78dd6ddcc-qmvj4\" (UID: \"181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79\") " pod="openstack/dnsmasq-dns-78dd6ddcc-qmvj4" Nov 25 09:55:50 crc kubenswrapper[4854]: I1125 09:55:50.475113 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-r8fm6" Nov 25 09:55:50 crc kubenswrapper[4854]: I1125 09:55:50.541964 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-qmvj4" Nov 25 09:55:50 crc kubenswrapper[4854]: I1125 09:55:50.911483 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-r8fm6"] Nov 25 09:55:50 crc kubenswrapper[4854]: W1125 09:55:50.912515 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1e8f46ac_8fb7_4161_85bb_25af767745b0.slice/crio-3e85fefed062aa6fc528078e2bea4eb0759a6cf1fb0725bbeb639296c4a808a7 WatchSource:0}: Error finding container 3e85fefed062aa6fc528078e2bea4eb0759a6cf1fb0725bbeb639296c4a808a7: Status 404 returned error can't find the container with id 3e85fefed062aa6fc528078e2bea4eb0759a6cf1fb0725bbeb639296c4a808a7 Nov 25 09:55:51 crc kubenswrapper[4854]: W1125 09:55:51.054582 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod181f45e6_ad6a_45ea_ba5a_03ae9ed7fd79.slice/crio-427b2ba4c465e7317c98ca37e177f7adf6a4315ee5ec91eef0fcc5536bf3d55d WatchSource:0}: Error finding container 427b2ba4c465e7317c98ca37e177f7adf6a4315ee5ec91eef0fcc5536bf3d55d: Status 404 returned error can't find the container with id 427b2ba4c465e7317c98ca37e177f7adf6a4315ee5ec91eef0fcc5536bf3d55d Nov 25 09:55:51 crc kubenswrapper[4854]: I1125 09:55:51.056148 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-qmvj4"] Nov 25 09:55:51 crc kubenswrapper[4854]: I1125 09:55:51.079396 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-qmvj4" event={"ID":"181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79","Type":"ContainerStarted","Data":"427b2ba4c465e7317c98ca37e177f7adf6a4315ee5ec91eef0fcc5536bf3d55d"} Nov 25 09:55:51 crc kubenswrapper[4854]: I1125 09:55:51.080424 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-r8fm6" event={"ID":"1e8f46ac-8fb7-4161-85bb-25af767745b0","Type":"ContainerStarted","Data":"3e85fefed062aa6fc528078e2bea4eb0759a6cf1fb0725bbeb639296c4a808a7"} Nov 25 09:55:53 crc kubenswrapper[4854]: I1125 09:55:53.337632 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-r8fm6"] Nov 25 09:55:53 crc kubenswrapper[4854]: I1125 09:55:53.366321 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-l6qff"] Nov 25 09:55:53 crc kubenswrapper[4854]: I1125 09:55:53.367792 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-l6qff" Nov 25 09:55:53 crc kubenswrapper[4854]: I1125 09:55:53.391942 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-l6qff"] Nov 25 09:55:53 crc kubenswrapper[4854]: I1125 09:55:53.432111 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/967a931c-cbe4-4085-b38b-60399485791d-config\") pod \"dnsmasq-dns-666b6646f7-l6qff\" (UID: \"967a931c-cbe4-4085-b38b-60399485791d\") " pod="openstack/dnsmasq-dns-666b6646f7-l6qff" Nov 25 09:55:53 crc kubenswrapper[4854]: I1125 09:55:53.432961 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/967a931c-cbe4-4085-b38b-60399485791d-dns-svc\") pod \"dnsmasq-dns-666b6646f7-l6qff\" (UID: \"967a931c-cbe4-4085-b38b-60399485791d\") " pod="openstack/dnsmasq-dns-666b6646f7-l6qff" Nov 25 09:55:53 crc kubenswrapper[4854]: I1125 09:55:53.433051 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-skpr5\" (UniqueName: \"kubernetes.io/projected/967a931c-cbe4-4085-b38b-60399485791d-kube-api-access-skpr5\") pod \"dnsmasq-dns-666b6646f7-l6qff\" (UID: \"967a931c-cbe4-4085-b38b-60399485791d\") " pod="openstack/dnsmasq-dns-666b6646f7-l6qff" Nov 25 09:55:53 crc kubenswrapper[4854]: I1125 09:55:53.534358 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/967a931c-cbe4-4085-b38b-60399485791d-config\") pod \"dnsmasq-dns-666b6646f7-l6qff\" (UID: \"967a931c-cbe4-4085-b38b-60399485791d\") " pod="openstack/dnsmasq-dns-666b6646f7-l6qff" Nov 25 09:55:53 crc kubenswrapper[4854]: I1125 09:55:53.534405 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/967a931c-cbe4-4085-b38b-60399485791d-dns-svc\") pod \"dnsmasq-dns-666b6646f7-l6qff\" (UID: \"967a931c-cbe4-4085-b38b-60399485791d\") " pod="openstack/dnsmasq-dns-666b6646f7-l6qff" Nov 25 09:55:53 crc kubenswrapper[4854]: I1125 09:55:53.534455 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-skpr5\" (UniqueName: \"kubernetes.io/projected/967a931c-cbe4-4085-b38b-60399485791d-kube-api-access-skpr5\") pod \"dnsmasq-dns-666b6646f7-l6qff\" (UID: \"967a931c-cbe4-4085-b38b-60399485791d\") " pod="openstack/dnsmasq-dns-666b6646f7-l6qff" Nov 25 09:55:53 crc kubenswrapper[4854]: I1125 09:55:53.535712 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/967a931c-cbe4-4085-b38b-60399485791d-dns-svc\") pod \"dnsmasq-dns-666b6646f7-l6qff\" (UID: \"967a931c-cbe4-4085-b38b-60399485791d\") " pod="openstack/dnsmasq-dns-666b6646f7-l6qff" Nov 25 09:55:53 crc kubenswrapper[4854]: I1125 09:55:53.536266 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/967a931c-cbe4-4085-b38b-60399485791d-config\") pod \"dnsmasq-dns-666b6646f7-l6qff\" (UID: \"967a931c-cbe4-4085-b38b-60399485791d\") " pod="openstack/dnsmasq-dns-666b6646f7-l6qff" Nov 25 09:55:53 crc kubenswrapper[4854]: I1125 09:55:53.576791 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-skpr5\" (UniqueName: \"kubernetes.io/projected/967a931c-cbe4-4085-b38b-60399485791d-kube-api-access-skpr5\") pod \"dnsmasq-dns-666b6646f7-l6qff\" (UID: \"967a931c-cbe4-4085-b38b-60399485791d\") " pod="openstack/dnsmasq-dns-666b6646f7-l6qff" Nov 25 09:55:53 crc kubenswrapper[4854]: I1125 09:55:53.668828 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-qmvj4"] Nov 25 09:55:53 crc kubenswrapper[4854]: I1125 09:55:53.692211 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-l6qff" Nov 25 09:55:53 crc kubenswrapper[4854]: I1125 09:55:53.732732 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-twfb7"] Nov 25 09:55:53 crc kubenswrapper[4854]: I1125 09:55:53.734420 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-twfb7" Nov 25 09:55:53 crc kubenswrapper[4854]: I1125 09:55:53.742450 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2cd76364-963f-4af7-83ef-cc73ab247e14-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-twfb7\" (UID: \"2cd76364-963f-4af7-83ef-cc73ab247e14\") " pod="openstack/dnsmasq-dns-57d769cc4f-twfb7" Nov 25 09:55:53 crc kubenswrapper[4854]: I1125 09:55:53.742635 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2cd76364-963f-4af7-83ef-cc73ab247e14-config\") pod \"dnsmasq-dns-57d769cc4f-twfb7\" (UID: \"2cd76364-963f-4af7-83ef-cc73ab247e14\") " pod="openstack/dnsmasq-dns-57d769cc4f-twfb7" Nov 25 09:55:53 crc kubenswrapper[4854]: I1125 09:55:53.742738 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f5v5r\" (UniqueName: \"kubernetes.io/projected/2cd76364-963f-4af7-83ef-cc73ab247e14-kube-api-access-f5v5r\") pod \"dnsmasq-dns-57d769cc4f-twfb7\" (UID: \"2cd76364-963f-4af7-83ef-cc73ab247e14\") " pod="openstack/dnsmasq-dns-57d769cc4f-twfb7" Nov 25 09:55:53 crc kubenswrapper[4854]: I1125 09:55:53.778705 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-twfb7"] Nov 25 09:55:53 crc kubenswrapper[4854]: I1125 09:55:53.845560 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2cd76364-963f-4af7-83ef-cc73ab247e14-config\") pod \"dnsmasq-dns-57d769cc4f-twfb7\" (UID: \"2cd76364-963f-4af7-83ef-cc73ab247e14\") " pod="openstack/dnsmasq-dns-57d769cc4f-twfb7" Nov 25 09:55:53 crc kubenswrapper[4854]: I1125 09:55:53.845639 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f5v5r\" (UniqueName: \"kubernetes.io/projected/2cd76364-963f-4af7-83ef-cc73ab247e14-kube-api-access-f5v5r\") pod \"dnsmasq-dns-57d769cc4f-twfb7\" (UID: \"2cd76364-963f-4af7-83ef-cc73ab247e14\") " pod="openstack/dnsmasq-dns-57d769cc4f-twfb7" Nov 25 09:55:53 crc kubenswrapper[4854]: I1125 09:55:53.845713 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2cd76364-963f-4af7-83ef-cc73ab247e14-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-twfb7\" (UID: \"2cd76364-963f-4af7-83ef-cc73ab247e14\") " pod="openstack/dnsmasq-dns-57d769cc4f-twfb7" Nov 25 09:55:53 crc kubenswrapper[4854]: I1125 09:55:53.846627 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2cd76364-963f-4af7-83ef-cc73ab247e14-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-twfb7\" (UID: \"2cd76364-963f-4af7-83ef-cc73ab247e14\") " pod="openstack/dnsmasq-dns-57d769cc4f-twfb7" Nov 25 09:55:53 crc kubenswrapper[4854]: I1125 09:55:53.847188 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2cd76364-963f-4af7-83ef-cc73ab247e14-config\") pod \"dnsmasq-dns-57d769cc4f-twfb7\" (UID: \"2cd76364-963f-4af7-83ef-cc73ab247e14\") " pod="openstack/dnsmasq-dns-57d769cc4f-twfb7" Nov 25 09:55:53 crc kubenswrapper[4854]: I1125 09:55:53.872694 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f5v5r\" (UniqueName: \"kubernetes.io/projected/2cd76364-963f-4af7-83ef-cc73ab247e14-kube-api-access-f5v5r\") pod \"dnsmasq-dns-57d769cc4f-twfb7\" (UID: \"2cd76364-963f-4af7-83ef-cc73ab247e14\") " pod="openstack/dnsmasq-dns-57d769cc4f-twfb7" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.132449 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-twfb7" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.282071 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-l6qff"] Nov 25 09:55:54 crc kubenswrapper[4854]: W1125 09:55:54.282743 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod967a931c_cbe4_4085_b38b_60399485791d.slice/crio-57183b0c96a04758765db6db4b395493831df681b9b46d3bb244e6360dd00646 WatchSource:0}: Error finding container 57183b0c96a04758765db6db4b395493831df681b9b46d3bb244e6360dd00646: Status 404 returned error can't find the container with id 57183b0c96a04758765db6db4b395493831df681b9b46d3bb244e6360dd00646 Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.493496 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.497422 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.504767 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-xk4p9" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.505160 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.505350 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.506000 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.506136 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.506187 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.506222 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.516140 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.523496 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-2"] Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.525803 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-2" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.535352 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-1"] Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.540336 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-1" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.543086 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-1"] Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.585950 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-2"] Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.636589 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-twfb7"] Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.660366 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c24229dd-3c9c-47b6-8080-a1d51e0e6868-config-data\") pod \"rabbitmq-server-1\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " pod="openstack/rabbitmq-server-1" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.660830 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c24229dd-3c9c-47b6-8080-a1d51e0e6868-rabbitmq-plugins\") pod \"rabbitmq-server-1\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " pod="openstack/rabbitmq-server-1" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.660983 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/dc586641-37b8-4b9b-8479-a3c552bec71d-erlang-cookie-secret\") pod \"rabbitmq-server-2\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " pod="openstack/rabbitmq-server-2" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.661112 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/6894f0be-f53f-401b-8707-4cc0cfd020dc-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " pod="openstack/rabbitmq-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.661203 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c24229dd-3c9c-47b6-8080-a1d51e0e6868-rabbitmq-tls\") pod \"rabbitmq-server-1\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " pod="openstack/rabbitmq-server-1" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.661281 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-server-1\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " pod="openstack/rabbitmq-server-1" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.661378 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/6894f0be-f53f-401b-8707-4cc0cfd020dc-server-conf\") pod \"rabbitmq-server-0\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " pod="openstack/rabbitmq-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.661469 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dc586641-37b8-4b9b-8479-a3c552bec71d-config-data\") pod \"rabbitmq-server-2\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " pod="openstack/rabbitmq-server-2" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.661643 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/dc586641-37b8-4b9b-8479-a3c552bec71d-rabbitmq-plugins\") pod \"rabbitmq-server-2\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " pod="openstack/rabbitmq-server-2" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.661784 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c24229dd-3c9c-47b6-8080-a1d51e0e6868-server-conf\") pod \"rabbitmq-server-1\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " pod="openstack/rabbitmq-server-1" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.661831 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bgg4h\" (UniqueName: \"kubernetes.io/projected/c24229dd-3c9c-47b6-8080-a1d51e0e6868-kube-api-access-bgg4h\") pod \"rabbitmq-server-1\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " pod="openstack/rabbitmq-server-1" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.662205 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/6894f0be-f53f-401b-8707-4cc0cfd020dc-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " pod="openstack/rabbitmq-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.662266 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/dc586641-37b8-4b9b-8479-a3c552bec71d-rabbitmq-confd\") pod \"rabbitmq-server-2\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " pod="openstack/rabbitmq-server-2" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.662337 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c24229dd-3c9c-47b6-8080-a1d51e0e6868-erlang-cookie-secret\") pod \"rabbitmq-server-1\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " pod="openstack/rabbitmq-server-1" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.662378 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/6894f0be-f53f-401b-8707-4cc0cfd020dc-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " pod="openstack/rabbitmq-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.662438 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/6894f0be-f53f-401b-8707-4cc0cfd020dc-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " pod="openstack/rabbitmq-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.662457 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c24229dd-3c9c-47b6-8080-a1d51e0e6868-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-1\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " pod="openstack/rabbitmq-server-1" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.662496 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/dc586641-37b8-4b9b-8479-a3c552bec71d-plugins-conf\") pod \"rabbitmq-server-2\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " pod="openstack/rabbitmq-server-2" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.662598 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c24229dd-3c9c-47b6-8080-a1d51e0e6868-rabbitmq-confd\") pod \"rabbitmq-server-1\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " pod="openstack/rabbitmq-server-1" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.662662 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5mgt5\" (UniqueName: \"kubernetes.io/projected/dc586641-37b8-4b9b-8479-a3c552bec71d-kube-api-access-5mgt5\") pod \"rabbitmq-server-2\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " pod="openstack/rabbitmq-server-2" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.662746 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/6894f0be-f53f-401b-8707-4cc0cfd020dc-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " pod="openstack/rabbitmq-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.662766 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6894f0be-f53f-401b-8707-4cc0cfd020dc-config-data\") pod \"rabbitmq-server-0\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " pod="openstack/rabbitmq-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.662780 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/6894f0be-f53f-401b-8707-4cc0cfd020dc-pod-info\") pod \"rabbitmq-server-0\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " pod="openstack/rabbitmq-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.662860 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " pod="openstack/rabbitmq-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.662929 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/dc586641-37b8-4b9b-8479-a3c552bec71d-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-2\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " pod="openstack/rabbitmq-server-2" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.663003 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c24229dd-3c9c-47b6-8080-a1d51e0e6868-plugins-conf\") pod \"rabbitmq-server-1\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " pod="openstack/rabbitmq-server-1" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.663028 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-2\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " pod="openstack/rabbitmq-server-2" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.663110 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/6894f0be-f53f-401b-8707-4cc0cfd020dc-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " pod="openstack/rabbitmq-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.663205 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c24229dd-3c9c-47b6-8080-a1d51e0e6868-pod-info\") pod \"rabbitmq-server-1\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " pod="openstack/rabbitmq-server-1" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.663427 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tj7rl\" (UniqueName: \"kubernetes.io/projected/6894f0be-f53f-401b-8707-4cc0cfd020dc-kube-api-access-tj7rl\") pod \"rabbitmq-server-0\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " pod="openstack/rabbitmq-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.663595 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/dc586641-37b8-4b9b-8479-a3c552bec71d-server-conf\") pod \"rabbitmq-server-2\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " pod="openstack/rabbitmq-server-2" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.663648 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/dc586641-37b8-4b9b-8479-a3c552bec71d-pod-info\") pod \"rabbitmq-server-2\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " pod="openstack/rabbitmq-server-2" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.663684 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/dc586641-37b8-4b9b-8479-a3c552bec71d-rabbitmq-tls\") pod \"rabbitmq-server-2\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " pod="openstack/rabbitmq-server-2" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.764944 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/dc586641-37b8-4b9b-8479-a3c552bec71d-erlang-cookie-secret\") pod \"rabbitmq-server-2\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " pod="openstack/rabbitmq-server-2" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.765001 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/6894f0be-f53f-401b-8707-4cc0cfd020dc-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " pod="openstack/rabbitmq-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.765052 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c24229dd-3c9c-47b6-8080-a1d51e0e6868-rabbitmq-tls\") pod \"rabbitmq-server-1\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " pod="openstack/rabbitmq-server-1" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.765090 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-server-1\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " pod="openstack/rabbitmq-server-1" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.765109 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/6894f0be-f53f-401b-8707-4cc0cfd020dc-server-conf\") pod \"rabbitmq-server-0\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " pod="openstack/rabbitmq-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.765123 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dc586641-37b8-4b9b-8479-a3c552bec71d-config-data\") pod \"rabbitmq-server-2\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " pod="openstack/rabbitmq-server-2" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.765142 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/dc586641-37b8-4b9b-8479-a3c552bec71d-rabbitmq-plugins\") pod \"rabbitmq-server-2\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " pod="openstack/rabbitmq-server-2" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.765161 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c24229dd-3c9c-47b6-8080-a1d51e0e6868-server-conf\") pod \"rabbitmq-server-1\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " pod="openstack/rabbitmq-server-1" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.765183 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bgg4h\" (UniqueName: \"kubernetes.io/projected/c24229dd-3c9c-47b6-8080-a1d51e0e6868-kube-api-access-bgg4h\") pod \"rabbitmq-server-1\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " pod="openstack/rabbitmq-server-1" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.765204 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/6894f0be-f53f-401b-8707-4cc0cfd020dc-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " pod="openstack/rabbitmq-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.765222 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/dc586641-37b8-4b9b-8479-a3c552bec71d-rabbitmq-confd\") pod \"rabbitmq-server-2\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " pod="openstack/rabbitmq-server-2" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.765252 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c24229dd-3c9c-47b6-8080-a1d51e0e6868-erlang-cookie-secret\") pod \"rabbitmq-server-1\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " pod="openstack/rabbitmq-server-1" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.765278 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/6894f0be-f53f-401b-8707-4cc0cfd020dc-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " pod="openstack/rabbitmq-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.765308 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/6894f0be-f53f-401b-8707-4cc0cfd020dc-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " pod="openstack/rabbitmq-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.765328 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c24229dd-3c9c-47b6-8080-a1d51e0e6868-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-1\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " pod="openstack/rabbitmq-server-1" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.765344 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/dc586641-37b8-4b9b-8479-a3c552bec71d-plugins-conf\") pod \"rabbitmq-server-2\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " pod="openstack/rabbitmq-server-2" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.765373 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c24229dd-3c9c-47b6-8080-a1d51e0e6868-rabbitmq-confd\") pod \"rabbitmq-server-1\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " pod="openstack/rabbitmq-server-1" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.765390 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5mgt5\" (UniqueName: \"kubernetes.io/projected/dc586641-37b8-4b9b-8479-a3c552bec71d-kube-api-access-5mgt5\") pod \"rabbitmq-server-2\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " pod="openstack/rabbitmq-server-2" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.765415 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/6894f0be-f53f-401b-8707-4cc0cfd020dc-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " pod="openstack/rabbitmq-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.765432 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/6894f0be-f53f-401b-8707-4cc0cfd020dc-pod-info\") pod \"rabbitmq-server-0\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " pod="openstack/rabbitmq-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.765447 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6894f0be-f53f-401b-8707-4cc0cfd020dc-config-data\") pod \"rabbitmq-server-0\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " pod="openstack/rabbitmq-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.765471 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " pod="openstack/rabbitmq-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.765491 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/dc586641-37b8-4b9b-8479-a3c552bec71d-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-2\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " pod="openstack/rabbitmq-server-2" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.765510 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c24229dd-3c9c-47b6-8080-a1d51e0e6868-plugins-conf\") pod \"rabbitmq-server-1\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " pod="openstack/rabbitmq-server-1" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.765529 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-2\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " pod="openstack/rabbitmq-server-2" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.765549 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/6894f0be-f53f-401b-8707-4cc0cfd020dc-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " pod="openstack/rabbitmq-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.765577 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c24229dd-3c9c-47b6-8080-a1d51e0e6868-pod-info\") pod \"rabbitmq-server-1\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " pod="openstack/rabbitmq-server-1" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.765599 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tj7rl\" (UniqueName: \"kubernetes.io/projected/6894f0be-f53f-401b-8707-4cc0cfd020dc-kube-api-access-tj7rl\") pod \"rabbitmq-server-0\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " pod="openstack/rabbitmq-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.765622 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/dc586641-37b8-4b9b-8479-a3c552bec71d-server-conf\") pod \"rabbitmq-server-2\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " pod="openstack/rabbitmq-server-2" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.765637 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/dc586641-37b8-4b9b-8479-a3c552bec71d-pod-info\") pod \"rabbitmq-server-2\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " pod="openstack/rabbitmq-server-2" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.765655 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/dc586641-37b8-4b9b-8479-a3c552bec71d-rabbitmq-tls\") pod \"rabbitmq-server-2\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " pod="openstack/rabbitmq-server-2" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.765724 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c24229dd-3c9c-47b6-8080-a1d51e0e6868-config-data\") pod \"rabbitmq-server-1\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " pod="openstack/rabbitmq-server-1" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.765744 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c24229dd-3c9c-47b6-8080-a1d51e0e6868-rabbitmq-plugins\") pod \"rabbitmq-server-1\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " pod="openstack/rabbitmq-server-1" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.765869 4854 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-server-1\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/rabbitmq-server-1" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.766086 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/6894f0be-f53f-401b-8707-4cc0cfd020dc-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " pod="openstack/rabbitmq-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.766338 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/6894f0be-f53f-401b-8707-4cc0cfd020dc-server-conf\") pod \"rabbitmq-server-0\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " pod="openstack/rabbitmq-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.766820 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dc586641-37b8-4b9b-8479-a3c552bec71d-config-data\") pod \"rabbitmq-server-2\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " pod="openstack/rabbitmq-server-2" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.767400 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/dc586641-37b8-4b9b-8479-a3c552bec71d-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-2\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " pod="openstack/rabbitmq-server-2" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.768174 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c24229dd-3c9c-47b6-8080-a1d51e0e6868-config-data\") pod \"rabbitmq-server-1\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " pod="openstack/rabbitmq-server-1" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.768548 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6894f0be-f53f-401b-8707-4cc0cfd020dc-config-data\") pod \"rabbitmq-server-0\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " pod="openstack/rabbitmq-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.768750 4854 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/rabbitmq-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.770261 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/dc586641-37b8-4b9b-8479-a3c552bec71d-erlang-cookie-secret\") pod \"rabbitmq-server-2\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " pod="openstack/rabbitmq-server-2" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.770438 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/dc586641-37b8-4b9b-8479-a3c552bec71d-plugins-conf\") pod \"rabbitmq-server-2\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " pod="openstack/rabbitmq-server-2" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.770814 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c24229dd-3c9c-47b6-8080-a1d51e0e6868-rabbitmq-plugins\") pod \"rabbitmq-server-1\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " pod="openstack/rabbitmq-server-1" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.771067 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/6894f0be-f53f-401b-8707-4cc0cfd020dc-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " pod="openstack/rabbitmq-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.771263 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c24229dd-3c9c-47b6-8080-a1d51e0e6868-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-1\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " pod="openstack/rabbitmq-server-1" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.772008 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/dc586641-37b8-4b9b-8479-a3c552bec71d-pod-info\") pod \"rabbitmq-server-2\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " pod="openstack/rabbitmq-server-2" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.772024 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/dc586641-37b8-4b9b-8479-a3c552bec71d-rabbitmq-tls\") pod \"rabbitmq-server-2\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " pod="openstack/rabbitmq-server-2" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.772572 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/dc586641-37b8-4b9b-8479-a3c552bec71d-server-conf\") pod \"rabbitmq-server-2\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " pod="openstack/rabbitmq-server-2" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.772782 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/6894f0be-f53f-401b-8707-4cc0cfd020dc-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " pod="openstack/rabbitmq-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.772801 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/dc586641-37b8-4b9b-8479-a3c552bec71d-rabbitmq-plugins\") pod \"rabbitmq-server-2\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " pod="openstack/rabbitmq-server-2" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.773114 4854 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-2\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/rabbitmq-server-2" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.773532 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c24229dd-3c9c-47b6-8080-a1d51e0e6868-rabbitmq-tls\") pod \"rabbitmq-server-1\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " pod="openstack/rabbitmq-server-1" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.774025 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c24229dd-3c9c-47b6-8080-a1d51e0e6868-server-conf\") pod \"rabbitmq-server-1\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " pod="openstack/rabbitmq-server-1" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.774139 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/6894f0be-f53f-401b-8707-4cc0cfd020dc-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " pod="openstack/rabbitmq-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.774554 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c24229dd-3c9c-47b6-8080-a1d51e0e6868-plugins-conf\") pod \"rabbitmq-server-1\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " pod="openstack/rabbitmq-server-1" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.774554 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/6894f0be-f53f-401b-8707-4cc0cfd020dc-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " pod="openstack/rabbitmq-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.775026 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c24229dd-3c9c-47b6-8080-a1d51e0e6868-rabbitmq-confd\") pod \"rabbitmq-server-1\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " pod="openstack/rabbitmq-server-1" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.775489 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c24229dd-3c9c-47b6-8080-a1d51e0e6868-pod-info\") pod \"rabbitmq-server-1\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " pod="openstack/rabbitmq-server-1" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.775563 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c24229dd-3c9c-47b6-8080-a1d51e0e6868-erlang-cookie-secret\") pod \"rabbitmq-server-1\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " pod="openstack/rabbitmq-server-1" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.776587 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/6894f0be-f53f-401b-8707-4cc0cfd020dc-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " pod="openstack/rabbitmq-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.787888 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/dc586641-37b8-4b9b-8479-a3c552bec71d-rabbitmq-confd\") pod \"rabbitmq-server-2\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " pod="openstack/rabbitmq-server-2" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.788279 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/6894f0be-f53f-401b-8707-4cc0cfd020dc-pod-info\") pod \"rabbitmq-server-0\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " pod="openstack/rabbitmq-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.789391 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bgg4h\" (UniqueName: \"kubernetes.io/projected/c24229dd-3c9c-47b6-8080-a1d51e0e6868-kube-api-access-bgg4h\") pod \"rabbitmq-server-1\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " pod="openstack/rabbitmq-server-1" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.790843 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tj7rl\" (UniqueName: \"kubernetes.io/projected/6894f0be-f53f-401b-8707-4cc0cfd020dc-kube-api-access-tj7rl\") pod \"rabbitmq-server-0\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " pod="openstack/rabbitmq-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.791639 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5mgt5\" (UniqueName: \"kubernetes.io/projected/dc586641-37b8-4b9b-8479-a3c552bec71d-kube-api-access-5mgt5\") pod \"rabbitmq-server-2\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " pod="openstack/rabbitmq-server-2" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.800687 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " pod="openstack/rabbitmq-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.808123 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-2\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " pod="openstack/rabbitmq-server-2" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.809087 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-server-1\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " pod="openstack/rabbitmq-server-1" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.836012 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.838411 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.840248 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.842980 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.842998 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.843060 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.843422 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-4l8k2" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.843682 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.843878 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.843997 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.847148 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-2" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.854125 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.864590 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-1" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.967855 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.967911 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/575fe5df-5a76-4633-9688-3997a708f3f4-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.967955 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/575fe5df-5a76-4633-9688-3997a708f3f4-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.968003 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/575fe5df-5a76-4633-9688-3997a708f3f4-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.968192 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/575fe5df-5a76-4633-9688-3997a708f3f4-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.968266 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/575fe5df-5a76-4633-9688-3997a708f3f4-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.968303 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/575fe5df-5a76-4633-9688-3997a708f3f4-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.968446 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fx929\" (UniqueName: \"kubernetes.io/projected/575fe5df-5a76-4633-9688-3997a708f3f4-kube-api-access-fx929\") pod \"rabbitmq-cell1-server-0\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.968488 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/575fe5df-5a76-4633-9688-3997a708f3f4-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.968545 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/575fe5df-5a76-4633-9688-3997a708f3f4-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:55:54 crc kubenswrapper[4854]: I1125 09:55:54.968594 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/575fe5df-5a76-4633-9688-3997a708f3f4-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:55:55 crc kubenswrapper[4854]: I1125 09:55:55.028537 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:55:55 crc kubenswrapper[4854]: I1125 09:55:55.028598 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:55:55 crc kubenswrapper[4854]: I1125 09:55:55.071508 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/575fe5df-5a76-4633-9688-3997a708f3f4-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:55:55 crc kubenswrapper[4854]: I1125 09:55:55.072083 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/575fe5df-5a76-4633-9688-3997a708f3f4-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:55:55 crc kubenswrapper[4854]: I1125 09:55:55.072189 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/575fe5df-5a76-4633-9688-3997a708f3f4-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:55:55 crc kubenswrapper[4854]: I1125 09:55:55.072306 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fx929\" (UniqueName: \"kubernetes.io/projected/575fe5df-5a76-4633-9688-3997a708f3f4-kube-api-access-fx929\") pod \"rabbitmq-cell1-server-0\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:55:55 crc kubenswrapper[4854]: I1125 09:55:55.072381 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/575fe5df-5a76-4633-9688-3997a708f3f4-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:55:55 crc kubenswrapper[4854]: I1125 09:55:55.072472 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/575fe5df-5a76-4633-9688-3997a708f3f4-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:55:55 crc kubenswrapper[4854]: I1125 09:55:55.072550 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/575fe5df-5a76-4633-9688-3997a708f3f4-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:55:55 crc kubenswrapper[4854]: I1125 09:55:55.072635 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/575fe5df-5a76-4633-9688-3997a708f3f4-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:55:55 crc kubenswrapper[4854]: I1125 09:55:55.072827 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:55:55 crc kubenswrapper[4854]: I1125 09:55:55.072915 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/575fe5df-5a76-4633-9688-3997a708f3f4-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:55:55 crc kubenswrapper[4854]: I1125 09:55:55.073026 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/575fe5df-5a76-4633-9688-3997a708f3f4-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:55:55 crc kubenswrapper[4854]: I1125 09:55:55.073160 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/575fe5df-5a76-4633-9688-3997a708f3f4-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:55:55 crc kubenswrapper[4854]: I1125 09:55:55.073483 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/575fe5df-5a76-4633-9688-3997a708f3f4-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:55:55 crc kubenswrapper[4854]: I1125 09:55:55.073752 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/575fe5df-5a76-4633-9688-3997a708f3f4-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:55:55 crc kubenswrapper[4854]: I1125 09:55:55.075410 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/575fe5df-5a76-4633-9688-3997a708f3f4-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:55:55 crc kubenswrapper[4854]: I1125 09:55:55.076213 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/575fe5df-5a76-4633-9688-3997a708f3f4-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:55:55 crc kubenswrapper[4854]: I1125 09:55:55.078293 4854 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:55:55 crc kubenswrapper[4854]: I1125 09:55:55.078448 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/575fe5df-5a76-4633-9688-3997a708f3f4-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:55:55 crc kubenswrapper[4854]: I1125 09:55:55.082268 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/575fe5df-5a76-4633-9688-3997a708f3f4-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:55:55 crc kubenswrapper[4854]: I1125 09:55:55.083357 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/575fe5df-5a76-4633-9688-3997a708f3f4-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:55:55 crc kubenswrapper[4854]: I1125 09:55:55.083962 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/575fe5df-5a76-4633-9688-3997a708f3f4-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:55:55 crc kubenswrapper[4854]: I1125 09:55:55.139935 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:55:55 crc kubenswrapper[4854]: I1125 09:55:55.144103 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fx929\" (UniqueName: \"kubernetes.io/projected/575fe5df-5a76-4633-9688-3997a708f3f4-kube-api-access-fx929\") pod \"rabbitmq-cell1-server-0\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:55:55 crc kubenswrapper[4854]: I1125 09:55:55.161602 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-l6qff" event={"ID":"967a931c-cbe4-4085-b38b-60399485791d","Type":"ContainerStarted","Data":"57183b0c96a04758765db6db4b395493831df681b9b46d3bb244e6360dd00646"} Nov 25 09:55:55 crc kubenswrapper[4854]: I1125 09:55:55.217263 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:55:56 crc kubenswrapper[4854]: I1125 09:55:56.218632 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 25 09:55:56 crc kubenswrapper[4854]: I1125 09:55:56.223813 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 25 09:55:56 crc kubenswrapper[4854]: I1125 09:55:56.236126 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 25 09:55:56 crc kubenswrapper[4854]: I1125 09:55:56.236504 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 25 09:55:56 crc kubenswrapper[4854]: I1125 09:55:56.240654 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-7xkvg" Nov 25 09:55:56 crc kubenswrapper[4854]: I1125 09:55:56.240927 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 25 09:55:56 crc kubenswrapper[4854]: I1125 09:55:56.241329 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 25 09:55:56 crc kubenswrapper[4854]: I1125 09:55:56.246303 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 25 09:55:56 crc kubenswrapper[4854]: I1125 09:55:56.314815 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0984ac84-1833-4ddb-b21b-d526b64e9991-operator-scripts\") pod \"openstack-galera-0\" (UID: \"0984ac84-1833-4ddb-b21b-d526b64e9991\") " pod="openstack/openstack-galera-0" Nov 25 09:55:56 crc kubenswrapper[4854]: I1125 09:55:56.314908 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/0984ac84-1833-4ddb-b21b-d526b64e9991-kolla-config\") pod \"openstack-galera-0\" (UID: \"0984ac84-1833-4ddb-b21b-d526b64e9991\") " pod="openstack/openstack-galera-0" Nov 25 09:55:56 crc kubenswrapper[4854]: I1125 09:55:56.314937 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jkvbd\" (UniqueName: \"kubernetes.io/projected/0984ac84-1833-4ddb-b21b-d526b64e9991-kube-api-access-jkvbd\") pod \"openstack-galera-0\" (UID: \"0984ac84-1833-4ddb-b21b-d526b64e9991\") " pod="openstack/openstack-galera-0" Nov 25 09:55:56 crc kubenswrapper[4854]: I1125 09:55:56.314978 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-galera-0\" (UID: \"0984ac84-1833-4ddb-b21b-d526b64e9991\") " pod="openstack/openstack-galera-0" Nov 25 09:55:56 crc kubenswrapper[4854]: I1125 09:55:56.315020 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/0984ac84-1833-4ddb-b21b-d526b64e9991-config-data-default\") pod \"openstack-galera-0\" (UID: \"0984ac84-1833-4ddb-b21b-d526b64e9991\") " pod="openstack/openstack-galera-0" Nov 25 09:55:56 crc kubenswrapper[4854]: I1125 09:55:56.315085 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/0984ac84-1833-4ddb-b21b-d526b64e9991-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"0984ac84-1833-4ddb-b21b-d526b64e9991\") " pod="openstack/openstack-galera-0" Nov 25 09:55:56 crc kubenswrapper[4854]: I1125 09:55:56.315123 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0984ac84-1833-4ddb-b21b-d526b64e9991-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"0984ac84-1833-4ddb-b21b-d526b64e9991\") " pod="openstack/openstack-galera-0" Nov 25 09:55:56 crc kubenswrapper[4854]: I1125 09:55:56.315157 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/0984ac84-1833-4ddb-b21b-d526b64e9991-config-data-generated\") pod \"openstack-galera-0\" (UID: \"0984ac84-1833-4ddb-b21b-d526b64e9991\") " pod="openstack/openstack-galera-0" Nov 25 09:55:56 crc kubenswrapper[4854]: I1125 09:55:56.417187 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/0984ac84-1833-4ddb-b21b-d526b64e9991-kolla-config\") pod \"openstack-galera-0\" (UID: \"0984ac84-1833-4ddb-b21b-d526b64e9991\") " pod="openstack/openstack-galera-0" Nov 25 09:55:56 crc kubenswrapper[4854]: I1125 09:55:56.417273 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jkvbd\" (UniqueName: \"kubernetes.io/projected/0984ac84-1833-4ddb-b21b-d526b64e9991-kube-api-access-jkvbd\") pod \"openstack-galera-0\" (UID: \"0984ac84-1833-4ddb-b21b-d526b64e9991\") " pod="openstack/openstack-galera-0" Nov 25 09:55:56 crc kubenswrapper[4854]: I1125 09:55:56.417342 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-galera-0\" (UID: \"0984ac84-1833-4ddb-b21b-d526b64e9991\") " pod="openstack/openstack-galera-0" Nov 25 09:55:56 crc kubenswrapper[4854]: I1125 09:55:56.417477 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/0984ac84-1833-4ddb-b21b-d526b64e9991-config-data-default\") pod \"openstack-galera-0\" (UID: \"0984ac84-1833-4ddb-b21b-d526b64e9991\") " pod="openstack/openstack-galera-0" Nov 25 09:55:56 crc kubenswrapper[4854]: I1125 09:55:56.417627 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/0984ac84-1833-4ddb-b21b-d526b64e9991-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"0984ac84-1833-4ddb-b21b-d526b64e9991\") " pod="openstack/openstack-galera-0" Nov 25 09:55:56 crc kubenswrapper[4854]: I1125 09:55:56.417718 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0984ac84-1833-4ddb-b21b-d526b64e9991-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"0984ac84-1833-4ddb-b21b-d526b64e9991\") " pod="openstack/openstack-galera-0" Nov 25 09:55:56 crc kubenswrapper[4854]: I1125 09:55:56.417787 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/0984ac84-1833-4ddb-b21b-d526b64e9991-config-data-generated\") pod \"openstack-galera-0\" (UID: \"0984ac84-1833-4ddb-b21b-d526b64e9991\") " pod="openstack/openstack-galera-0" Nov 25 09:55:56 crc kubenswrapper[4854]: I1125 09:55:56.417934 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0984ac84-1833-4ddb-b21b-d526b64e9991-operator-scripts\") pod \"openstack-galera-0\" (UID: \"0984ac84-1833-4ddb-b21b-d526b64e9991\") " pod="openstack/openstack-galera-0" Nov 25 09:55:56 crc kubenswrapper[4854]: I1125 09:55:56.418652 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/0984ac84-1833-4ddb-b21b-d526b64e9991-kolla-config\") pod \"openstack-galera-0\" (UID: \"0984ac84-1833-4ddb-b21b-d526b64e9991\") " pod="openstack/openstack-galera-0" Nov 25 09:55:56 crc kubenswrapper[4854]: I1125 09:55:56.420946 4854 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-galera-0\" (UID: \"0984ac84-1833-4ddb-b21b-d526b64e9991\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/openstack-galera-0" Nov 25 09:55:56 crc kubenswrapper[4854]: I1125 09:55:56.422357 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/0984ac84-1833-4ddb-b21b-d526b64e9991-config-data-generated\") pod \"openstack-galera-0\" (UID: \"0984ac84-1833-4ddb-b21b-d526b64e9991\") " pod="openstack/openstack-galera-0" Nov 25 09:55:56 crc kubenswrapper[4854]: I1125 09:55:56.422573 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/0984ac84-1833-4ddb-b21b-d526b64e9991-config-data-default\") pod \"openstack-galera-0\" (UID: \"0984ac84-1833-4ddb-b21b-d526b64e9991\") " pod="openstack/openstack-galera-0" Nov 25 09:55:56 crc kubenswrapper[4854]: I1125 09:55:56.423395 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0984ac84-1833-4ddb-b21b-d526b64e9991-operator-scripts\") pod \"openstack-galera-0\" (UID: \"0984ac84-1833-4ddb-b21b-d526b64e9991\") " pod="openstack/openstack-galera-0" Nov 25 09:55:56 crc kubenswrapper[4854]: I1125 09:55:56.429340 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0984ac84-1833-4ddb-b21b-d526b64e9991-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"0984ac84-1833-4ddb-b21b-d526b64e9991\") " pod="openstack/openstack-galera-0" Nov 25 09:55:56 crc kubenswrapper[4854]: I1125 09:55:56.431343 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/0984ac84-1833-4ddb-b21b-d526b64e9991-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"0984ac84-1833-4ddb-b21b-d526b64e9991\") " pod="openstack/openstack-galera-0" Nov 25 09:55:56 crc kubenswrapper[4854]: I1125 09:55:56.452810 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jkvbd\" (UniqueName: \"kubernetes.io/projected/0984ac84-1833-4ddb-b21b-d526b64e9991-kube-api-access-jkvbd\") pod \"openstack-galera-0\" (UID: \"0984ac84-1833-4ddb-b21b-d526b64e9991\") " pod="openstack/openstack-galera-0" Nov 25 09:55:56 crc kubenswrapper[4854]: I1125 09:55:56.466794 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-galera-0\" (UID: \"0984ac84-1833-4ddb-b21b-d526b64e9991\") " pod="openstack/openstack-galera-0" Nov 25 09:55:56 crc kubenswrapper[4854]: I1125 09:55:56.594242 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 25 09:55:57 crc kubenswrapper[4854]: W1125 09:55:57.631923 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2cd76364_963f_4af7_83ef_cc73ab247e14.slice/crio-59fb80ff18aeaee7868e2bdce5b76aac5ad3e32c43e51fcb566af29d3cec3e32 WatchSource:0}: Error finding container 59fb80ff18aeaee7868e2bdce5b76aac5ad3e32c43e51fcb566af29d3cec3e32: Status 404 returned error can't find the container with id 59fb80ff18aeaee7868e2bdce5b76aac5ad3e32c43e51fcb566af29d3cec3e32 Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.656833 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.659047 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.668723 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.668954 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.674293 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.675089 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-qfnmr" Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.675329 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.739435 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/827c4948-d1bc-4c63-838b-57f267bdcf93-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"827c4948-d1bc-4c63-838b-57f267bdcf93\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.739575 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"827c4948-d1bc-4c63-838b-57f267bdcf93\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.739602 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/827c4948-d1bc-4c63-838b-57f267bdcf93-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"827c4948-d1bc-4c63-838b-57f267bdcf93\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.739621 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/827c4948-d1bc-4c63-838b-57f267bdcf93-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"827c4948-d1bc-4c63-838b-57f267bdcf93\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.739639 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/827c4948-d1bc-4c63-838b-57f267bdcf93-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"827c4948-d1bc-4c63-838b-57f267bdcf93\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.739982 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-np4dk\" (UniqueName: \"kubernetes.io/projected/827c4948-d1bc-4c63-838b-57f267bdcf93-kube-api-access-np4dk\") pod \"openstack-cell1-galera-0\" (UID: \"827c4948-d1bc-4c63-838b-57f267bdcf93\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.740105 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/827c4948-d1bc-4c63-838b-57f267bdcf93-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"827c4948-d1bc-4c63-838b-57f267bdcf93\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.740135 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/827c4948-d1bc-4c63-838b-57f267bdcf93-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"827c4948-d1bc-4c63-838b-57f267bdcf93\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.842325 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-np4dk\" (UniqueName: \"kubernetes.io/projected/827c4948-d1bc-4c63-838b-57f267bdcf93-kube-api-access-np4dk\") pod \"openstack-cell1-galera-0\" (UID: \"827c4948-d1bc-4c63-838b-57f267bdcf93\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.842423 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/827c4948-d1bc-4c63-838b-57f267bdcf93-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"827c4948-d1bc-4c63-838b-57f267bdcf93\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.842449 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/827c4948-d1bc-4c63-838b-57f267bdcf93-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"827c4948-d1bc-4c63-838b-57f267bdcf93\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.842497 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/827c4948-d1bc-4c63-838b-57f267bdcf93-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"827c4948-d1bc-4c63-838b-57f267bdcf93\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.842564 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"827c4948-d1bc-4c63-838b-57f267bdcf93\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.842585 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/827c4948-d1bc-4c63-838b-57f267bdcf93-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"827c4948-d1bc-4c63-838b-57f267bdcf93\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.842611 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/827c4948-d1bc-4c63-838b-57f267bdcf93-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"827c4948-d1bc-4c63-838b-57f267bdcf93\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.842637 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/827c4948-d1bc-4c63-838b-57f267bdcf93-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"827c4948-d1bc-4c63-838b-57f267bdcf93\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.843900 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/827c4948-d1bc-4c63-838b-57f267bdcf93-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"827c4948-d1bc-4c63-838b-57f267bdcf93\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.844005 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/827c4948-d1bc-4c63-838b-57f267bdcf93-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"827c4948-d1bc-4c63-838b-57f267bdcf93\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.844223 4854 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"827c4948-d1bc-4c63-838b-57f267bdcf93\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/openstack-cell1-galera-0" Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.846509 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/827c4948-d1bc-4c63-838b-57f267bdcf93-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"827c4948-d1bc-4c63-838b-57f267bdcf93\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.848378 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/827c4948-d1bc-4c63-838b-57f267bdcf93-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"827c4948-d1bc-4c63-838b-57f267bdcf93\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.848976 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/827c4948-d1bc-4c63-838b-57f267bdcf93-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"827c4948-d1bc-4c63-838b-57f267bdcf93\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.862237 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/827c4948-d1bc-4c63-838b-57f267bdcf93-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"827c4948-d1bc-4c63-838b-57f267bdcf93\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.875908 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-np4dk\" (UniqueName: \"kubernetes.io/projected/827c4948-d1bc-4c63-838b-57f267bdcf93-kube-api-access-np4dk\") pod \"openstack-cell1-galera-0\" (UID: \"827c4948-d1bc-4c63-838b-57f267bdcf93\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.877685 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"827c4948-d1bc-4c63-838b-57f267bdcf93\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.889284 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.890496 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.893789 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.900876 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-ssg2t" Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.901075 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.920573 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.945179 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hm72q\" (UniqueName: \"kubernetes.io/projected/2f639da7-7576-4274-94c5-4304b6af9b4d-kube-api-access-hm72q\") pod \"memcached-0\" (UID: \"2f639da7-7576-4274-94c5-4304b6af9b4d\") " pod="openstack/memcached-0" Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.945337 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f639da7-7576-4274-94c5-4304b6af9b4d-combined-ca-bundle\") pod \"memcached-0\" (UID: \"2f639da7-7576-4274-94c5-4304b6af9b4d\") " pod="openstack/memcached-0" Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.945385 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2f639da7-7576-4274-94c5-4304b6af9b4d-config-data\") pod \"memcached-0\" (UID: \"2f639da7-7576-4274-94c5-4304b6af9b4d\") " pod="openstack/memcached-0" Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.945471 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/2f639da7-7576-4274-94c5-4304b6af9b4d-memcached-tls-certs\") pod \"memcached-0\" (UID: \"2f639da7-7576-4274-94c5-4304b6af9b4d\") " pod="openstack/memcached-0" Nov 25 09:55:57 crc kubenswrapper[4854]: I1125 09:55:57.945507 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2f639da7-7576-4274-94c5-4304b6af9b4d-kolla-config\") pod \"memcached-0\" (UID: \"2f639da7-7576-4274-94c5-4304b6af9b4d\") " pod="openstack/memcached-0" Nov 25 09:55:58 crc kubenswrapper[4854]: I1125 09:55:58.033823 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 25 09:55:58 crc kubenswrapper[4854]: I1125 09:55:58.051826 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hm72q\" (UniqueName: \"kubernetes.io/projected/2f639da7-7576-4274-94c5-4304b6af9b4d-kube-api-access-hm72q\") pod \"memcached-0\" (UID: \"2f639da7-7576-4274-94c5-4304b6af9b4d\") " pod="openstack/memcached-0" Nov 25 09:55:58 crc kubenswrapper[4854]: I1125 09:55:58.052227 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f639da7-7576-4274-94c5-4304b6af9b4d-combined-ca-bundle\") pod \"memcached-0\" (UID: \"2f639da7-7576-4274-94c5-4304b6af9b4d\") " pod="openstack/memcached-0" Nov 25 09:55:58 crc kubenswrapper[4854]: I1125 09:55:58.052269 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2f639da7-7576-4274-94c5-4304b6af9b4d-config-data\") pod \"memcached-0\" (UID: \"2f639da7-7576-4274-94c5-4304b6af9b4d\") " pod="openstack/memcached-0" Nov 25 09:55:58 crc kubenswrapper[4854]: I1125 09:55:58.052348 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/2f639da7-7576-4274-94c5-4304b6af9b4d-memcached-tls-certs\") pod \"memcached-0\" (UID: \"2f639da7-7576-4274-94c5-4304b6af9b4d\") " pod="openstack/memcached-0" Nov 25 09:55:58 crc kubenswrapper[4854]: I1125 09:55:58.052385 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2f639da7-7576-4274-94c5-4304b6af9b4d-kolla-config\") pod \"memcached-0\" (UID: \"2f639da7-7576-4274-94c5-4304b6af9b4d\") " pod="openstack/memcached-0" Nov 25 09:55:58 crc kubenswrapper[4854]: I1125 09:55:58.053449 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2f639da7-7576-4274-94c5-4304b6af9b4d-config-data\") pod \"memcached-0\" (UID: \"2f639da7-7576-4274-94c5-4304b6af9b4d\") " pod="openstack/memcached-0" Nov 25 09:55:58 crc kubenswrapper[4854]: I1125 09:55:58.053788 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2f639da7-7576-4274-94c5-4304b6af9b4d-kolla-config\") pod \"memcached-0\" (UID: \"2f639da7-7576-4274-94c5-4304b6af9b4d\") " pod="openstack/memcached-0" Nov 25 09:55:58 crc kubenswrapper[4854]: I1125 09:55:58.055652 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f639da7-7576-4274-94c5-4304b6af9b4d-combined-ca-bundle\") pod \"memcached-0\" (UID: \"2f639da7-7576-4274-94c5-4304b6af9b4d\") " pod="openstack/memcached-0" Nov 25 09:55:58 crc kubenswrapper[4854]: I1125 09:55:58.059185 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/2f639da7-7576-4274-94c5-4304b6af9b4d-memcached-tls-certs\") pod \"memcached-0\" (UID: \"2f639da7-7576-4274-94c5-4304b6af9b4d\") " pod="openstack/memcached-0" Nov 25 09:55:58 crc kubenswrapper[4854]: I1125 09:55:58.075607 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hm72q\" (UniqueName: \"kubernetes.io/projected/2f639da7-7576-4274-94c5-4304b6af9b4d-kube-api-access-hm72q\") pod \"memcached-0\" (UID: \"2f639da7-7576-4274-94c5-4304b6af9b4d\") " pod="openstack/memcached-0" Nov 25 09:55:58 crc kubenswrapper[4854]: I1125 09:55:58.204515 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-twfb7" event={"ID":"2cd76364-963f-4af7-83ef-cc73ab247e14","Type":"ContainerStarted","Data":"59fb80ff18aeaee7868e2bdce5b76aac5ad3e32c43e51fcb566af29d3cec3e32"} Nov 25 09:55:58 crc kubenswrapper[4854]: I1125 09:55:58.258007 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 25 09:55:59 crc kubenswrapper[4854]: I1125 09:55:59.891442 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 25 09:56:00 crc kubenswrapper[4854]: I1125 09:56:00.092928 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 09:56:00 crc kubenswrapper[4854]: I1125 09:56:00.094324 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 09:56:00 crc kubenswrapper[4854]: I1125 09:56:00.097120 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-gwcw9" Nov 25 09:56:00 crc kubenswrapper[4854]: I1125 09:56:00.111498 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 09:56:00 crc kubenswrapper[4854]: I1125 09:56:00.205338 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w5t8s\" (UniqueName: \"kubernetes.io/projected/a52007f7-ea68-422e-90d3-be11aa0184d5-kube-api-access-w5t8s\") pod \"kube-state-metrics-0\" (UID: \"a52007f7-ea68-422e-90d3-be11aa0184d5\") " pod="openstack/kube-state-metrics-0" Nov 25 09:56:00 crc kubenswrapper[4854]: I1125 09:56:00.315817 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w5t8s\" (UniqueName: \"kubernetes.io/projected/a52007f7-ea68-422e-90d3-be11aa0184d5-kube-api-access-w5t8s\") pod \"kube-state-metrics-0\" (UID: \"a52007f7-ea68-422e-90d3-be11aa0184d5\") " pod="openstack/kube-state-metrics-0" Nov 25 09:56:00 crc kubenswrapper[4854]: I1125 09:56:00.349847 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w5t8s\" (UniqueName: \"kubernetes.io/projected/a52007f7-ea68-422e-90d3-be11aa0184d5-kube-api-access-w5t8s\") pod \"kube-state-metrics-0\" (UID: \"a52007f7-ea68-422e-90d3-be11aa0184d5\") " pod="openstack/kube-state-metrics-0" Nov 25 09:56:00 crc kubenswrapper[4854]: I1125 09:56:00.416913 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 09:56:00 crc kubenswrapper[4854]: I1125 09:56:00.938571 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-ui-dashboards-7d5fb4cbfb-c6npv"] Nov 25 09:56:00 crc kubenswrapper[4854]: I1125 09:56:00.939865 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-c6npv" Nov 25 09:56:00 crc kubenswrapper[4854]: I1125 09:56:00.946068 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-ui-dashboards-sa-dockercfg-xk59t" Nov 25 09:56:00 crc kubenswrapper[4854]: I1125 09:56:00.946263 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-ui-dashboards" Nov 25 09:56:00 crc kubenswrapper[4854]: I1125 09:56:00.962830 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-ui-dashboards-7d5fb4cbfb-c6npv"] Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.048833 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a5aeba85-24cb-48ef-9050-93a40b4d67f0-serving-cert\") pod \"observability-ui-dashboards-7d5fb4cbfb-c6npv\" (UID: \"a5aeba85-24cb-48ef-9050-93a40b4d67f0\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-c6npv" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.049053 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jp57z\" (UniqueName: \"kubernetes.io/projected/a5aeba85-24cb-48ef-9050-93a40b4d67f0-kube-api-access-jp57z\") pod \"observability-ui-dashboards-7d5fb4cbfb-c6npv\" (UID: \"a5aeba85-24cb-48ef-9050-93a40b4d67f0\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-c6npv" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.151295 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jp57z\" (UniqueName: \"kubernetes.io/projected/a5aeba85-24cb-48ef-9050-93a40b4d67f0-kube-api-access-jp57z\") pod \"observability-ui-dashboards-7d5fb4cbfb-c6npv\" (UID: \"a5aeba85-24cb-48ef-9050-93a40b4d67f0\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-c6npv" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.151374 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a5aeba85-24cb-48ef-9050-93a40b4d67f0-serving-cert\") pod \"observability-ui-dashboards-7d5fb4cbfb-c6npv\" (UID: \"a5aeba85-24cb-48ef-9050-93a40b4d67f0\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-c6npv" Nov 25 09:56:01 crc kubenswrapper[4854]: E1125 09:56:01.151621 4854 secret.go:188] Couldn't get secret openshift-operators/observability-ui-dashboards: secret "observability-ui-dashboards" not found Nov 25 09:56:01 crc kubenswrapper[4854]: E1125 09:56:01.151691 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a5aeba85-24cb-48ef-9050-93a40b4d67f0-serving-cert podName:a5aeba85-24cb-48ef-9050-93a40b4d67f0 nodeName:}" failed. No retries permitted until 2025-11-25 09:56:01.651655406 +0000 UTC m=+1167.504648782 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/a5aeba85-24cb-48ef-9050-93a40b4d67f0-serving-cert") pod "observability-ui-dashboards-7d5fb4cbfb-c6npv" (UID: "a5aeba85-24cb-48ef-9050-93a40b4d67f0") : secret "observability-ui-dashboards" not found Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.198192 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jp57z\" (UniqueName: \"kubernetes.io/projected/a5aeba85-24cb-48ef-9050-93a40b4d67f0-kube-api-access-jp57z\") pod \"observability-ui-dashboards-7d5fb4cbfb-c6npv\" (UID: \"a5aeba85-24cb-48ef-9050-93a40b4d67f0\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-c6npv" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.287611 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-7c6c5466d9-g4cjl"] Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.289016 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7c6c5466d9-g4cjl" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.338418 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-7c6c5466d9-g4cjl"] Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.460797 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3-console-config\") pod \"console-7c6c5466d9-g4cjl\" (UID: \"f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3\") " pod="openshift-console/console-7c6c5466d9-g4cjl" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.460854 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3-service-ca\") pod \"console-7c6c5466d9-g4cjl\" (UID: \"f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3\") " pod="openshift-console/console-7c6c5466d9-g4cjl" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.460881 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3-console-serving-cert\") pod \"console-7c6c5466d9-g4cjl\" (UID: \"f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3\") " pod="openshift-console/console-7c6c5466d9-g4cjl" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.461006 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b6p52\" (UniqueName: \"kubernetes.io/projected/f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3-kube-api-access-b6p52\") pod \"console-7c6c5466d9-g4cjl\" (UID: \"f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3\") " pod="openshift-console/console-7c6c5466d9-g4cjl" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.461118 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3-console-oauth-config\") pod \"console-7c6c5466d9-g4cjl\" (UID: \"f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3\") " pod="openshift-console/console-7c6c5466d9-g4cjl" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.461150 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3-trusted-ca-bundle\") pod \"console-7c6c5466d9-g4cjl\" (UID: \"f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3\") " pod="openshift-console/console-7c6c5466d9-g4cjl" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.461193 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3-oauth-serving-cert\") pod \"console-7c6c5466d9-g4cjl\" (UID: \"f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3\") " pod="openshift-console/console-7c6c5466d9-g4cjl" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.470128 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.489720 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.498752 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.499032 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.506113 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.521762 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.537792 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.540372 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.542564 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-t6crr" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.568536 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-flk2x\" (UniqueName: \"kubernetes.io/projected/93c535b4-23bb-4c71-8ddc-1304ca205e55-kube-api-access-flk2x\") pod \"prometheus-metric-storage-0\" (UID: \"93c535b4-23bb-4c71-8ddc-1304ca205e55\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.569010 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b6p52\" (UniqueName: \"kubernetes.io/projected/f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3-kube-api-access-b6p52\") pod \"console-7c6c5466d9-g4cjl\" (UID: \"f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3\") " pod="openshift-console/console-7c6c5466d9-g4cjl" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.569245 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/93c535b4-23bb-4c71-8ddc-1304ca205e55-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"93c535b4-23bb-4c71-8ddc-1304ca205e55\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.569351 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/93c535b4-23bb-4c71-8ddc-1304ca205e55-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"93c535b4-23bb-4c71-8ddc-1304ca205e55\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.569498 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-92f6511f-ed36-41a5-a620-d81da35cdd8a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-92f6511f-ed36-41a5-a620-d81da35cdd8a\") pod \"prometheus-metric-storage-0\" (UID: \"93c535b4-23bb-4c71-8ddc-1304ca205e55\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.569578 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/93c535b4-23bb-4c71-8ddc-1304ca205e55-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"93c535b4-23bb-4c71-8ddc-1304ca205e55\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.569654 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/93c535b4-23bb-4c71-8ddc-1304ca205e55-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"93c535b4-23bb-4c71-8ddc-1304ca205e55\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.571445 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3-console-oauth-config\") pod \"console-7c6c5466d9-g4cjl\" (UID: \"f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3\") " pod="openshift-console/console-7c6c5466d9-g4cjl" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.571518 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3-trusted-ca-bundle\") pod \"console-7c6c5466d9-g4cjl\" (UID: \"f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3\") " pod="openshift-console/console-7c6c5466d9-g4cjl" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.571609 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3-oauth-serving-cert\") pod \"console-7c6c5466d9-g4cjl\" (UID: \"f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3\") " pod="openshift-console/console-7c6c5466d9-g4cjl" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.571718 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3-console-config\") pod \"console-7c6c5466d9-g4cjl\" (UID: \"f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3\") " pod="openshift-console/console-7c6c5466d9-g4cjl" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.571764 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/93c535b4-23bb-4c71-8ddc-1304ca205e55-config\") pod \"prometheus-metric-storage-0\" (UID: \"93c535b4-23bb-4c71-8ddc-1304ca205e55\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.571801 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/93c535b4-23bb-4c71-8ddc-1304ca205e55-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"93c535b4-23bb-4c71-8ddc-1304ca205e55\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.571841 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3-service-ca\") pod \"console-7c6c5466d9-g4cjl\" (UID: \"f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3\") " pod="openshift-console/console-7c6c5466d9-g4cjl" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.571874 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3-console-serving-cert\") pod \"console-7c6c5466d9-g4cjl\" (UID: \"f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3\") " pod="openshift-console/console-7c6c5466d9-g4cjl" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.572786 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3-oauth-serving-cert\") pod \"console-7c6c5466d9-g4cjl\" (UID: \"f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3\") " pod="openshift-console/console-7c6c5466d9-g4cjl" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.572950 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3-trusted-ca-bundle\") pod \"console-7c6c5466d9-g4cjl\" (UID: \"f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3\") " pod="openshift-console/console-7c6c5466d9-g4cjl" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.574507 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3-service-ca\") pod \"console-7c6c5466d9-g4cjl\" (UID: \"f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3\") " pod="openshift-console/console-7c6c5466d9-g4cjl" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.575311 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3-console-config\") pod \"console-7c6c5466d9-g4cjl\" (UID: \"f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3\") " pod="openshift-console/console-7c6c5466d9-g4cjl" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.606744 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3-console-serving-cert\") pod \"console-7c6c5466d9-g4cjl\" (UID: \"f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3\") " pod="openshift-console/console-7c6c5466d9-g4cjl" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.621603 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b6p52\" (UniqueName: \"kubernetes.io/projected/f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3-kube-api-access-b6p52\") pod \"console-7c6c5466d9-g4cjl\" (UID: \"f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3\") " pod="openshift-console/console-7c6c5466d9-g4cjl" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.624855 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3-console-oauth-config\") pod \"console-7c6c5466d9-g4cjl\" (UID: \"f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3\") " pod="openshift-console/console-7c6c5466d9-g4cjl" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.630982 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7c6c5466d9-g4cjl" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.679865 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a5aeba85-24cb-48ef-9050-93a40b4d67f0-serving-cert\") pod \"observability-ui-dashboards-7d5fb4cbfb-c6npv\" (UID: \"a5aeba85-24cb-48ef-9050-93a40b4d67f0\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-c6npv" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.679942 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/93c535b4-23bb-4c71-8ddc-1304ca205e55-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"93c535b4-23bb-4c71-8ddc-1304ca205e55\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.679995 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/93c535b4-23bb-4c71-8ddc-1304ca205e55-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"93c535b4-23bb-4c71-8ddc-1304ca205e55\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.680089 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-92f6511f-ed36-41a5-a620-d81da35cdd8a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-92f6511f-ed36-41a5-a620-d81da35cdd8a\") pod \"prometheus-metric-storage-0\" (UID: \"93c535b4-23bb-4c71-8ddc-1304ca205e55\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.680315 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/93c535b4-23bb-4c71-8ddc-1304ca205e55-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"93c535b4-23bb-4c71-8ddc-1304ca205e55\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.680343 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/93c535b4-23bb-4c71-8ddc-1304ca205e55-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"93c535b4-23bb-4c71-8ddc-1304ca205e55\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.680431 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/93c535b4-23bb-4c71-8ddc-1304ca205e55-config\") pod \"prometheus-metric-storage-0\" (UID: \"93c535b4-23bb-4c71-8ddc-1304ca205e55\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.680452 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/93c535b4-23bb-4c71-8ddc-1304ca205e55-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"93c535b4-23bb-4c71-8ddc-1304ca205e55\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.680550 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-flk2x\" (UniqueName: \"kubernetes.io/projected/93c535b4-23bb-4c71-8ddc-1304ca205e55-kube-api-access-flk2x\") pod \"prometheus-metric-storage-0\" (UID: \"93c535b4-23bb-4c71-8ddc-1304ca205e55\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.687889 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/93c535b4-23bb-4c71-8ddc-1304ca205e55-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"93c535b4-23bb-4c71-8ddc-1304ca205e55\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.688202 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a5aeba85-24cb-48ef-9050-93a40b4d67f0-serving-cert\") pod \"observability-ui-dashboards-7d5fb4cbfb-c6npv\" (UID: \"a5aeba85-24cb-48ef-9050-93a40b4d67f0\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-c6npv" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.690812 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/93c535b4-23bb-4c71-8ddc-1304ca205e55-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"93c535b4-23bb-4c71-8ddc-1304ca205e55\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.691762 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/93c535b4-23bb-4c71-8ddc-1304ca205e55-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"93c535b4-23bb-4c71-8ddc-1304ca205e55\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.693081 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/93c535b4-23bb-4c71-8ddc-1304ca205e55-config\") pod \"prometheus-metric-storage-0\" (UID: \"93c535b4-23bb-4c71-8ddc-1304ca205e55\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.694090 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/93c535b4-23bb-4c71-8ddc-1304ca205e55-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"93c535b4-23bb-4c71-8ddc-1304ca205e55\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.696129 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/93c535b4-23bb-4c71-8ddc-1304ca205e55-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"93c535b4-23bb-4c71-8ddc-1304ca205e55\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.698357 4854 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.698383 4854 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-92f6511f-ed36-41a5-a620-d81da35cdd8a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-92f6511f-ed36-41a5-a620-d81da35cdd8a\") pod \"prometheus-metric-storage-0\" (UID: \"93c535b4-23bb-4c71-8ddc-1304ca205e55\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/8135b1691aea57bacf9aa6f6ce849194fda4d61ab30e82c5a91a76373bce7e14/globalmount\"" pod="openstack/prometheus-metric-storage-0" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.711223 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-flk2x\" (UniqueName: \"kubernetes.io/projected/93c535b4-23bb-4c71-8ddc-1304ca205e55-kube-api-access-flk2x\") pod \"prometheus-metric-storage-0\" (UID: \"93c535b4-23bb-4c71-8ddc-1304ca205e55\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.753331 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-92f6511f-ed36-41a5-a620-d81da35cdd8a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-92f6511f-ed36-41a5-a620-d81da35cdd8a\") pod \"prometheus-metric-storage-0\" (UID: \"93c535b4-23bb-4c71-8ddc-1304ca205e55\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.870234 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-c6npv" Nov 25 09:56:01 crc kubenswrapper[4854]: I1125 09:56:01.915385 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 25 09:56:02 crc kubenswrapper[4854]: I1125 09:56:02.942556 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-7dl26"] Nov 25 09:56:02 crc kubenswrapper[4854]: I1125 09:56:02.944305 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-7dl26" Nov 25 09:56:02 crc kubenswrapper[4854]: I1125 09:56:02.947113 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 25 09:56:02 crc kubenswrapper[4854]: I1125 09:56:02.947683 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-s8xpt" Nov 25 09:56:02 crc kubenswrapper[4854]: I1125 09:56:02.952236 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 25 09:56:02 crc kubenswrapper[4854]: I1125 09:56:02.954180 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-2t2j4"] Nov 25 09:56:02 crc kubenswrapper[4854]: I1125 09:56:02.975537 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-7dl26"] Nov 25 09:56:02 crc kubenswrapper[4854]: I1125 09:56:02.975640 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-2t2j4" Nov 25 09:56:02 crc kubenswrapper[4854]: I1125 09:56:02.978708 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-2t2j4"] Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.033860 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xwjbh\" (UniqueName: \"kubernetes.io/projected/04573f28-a6e2-46ca-8a02-a2265c5d68e9-kube-api-access-xwjbh\") pod \"ovn-controller-7dl26\" (UID: \"04573f28-a6e2-46ca-8a02-a2265c5d68e9\") " pod="openstack/ovn-controller-7dl26" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.033913 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f1553410-0e8c-4a68-89ed-67f3eeef891d-var-log\") pod \"ovn-controller-ovs-2t2j4\" (UID: \"f1553410-0e8c-4a68-89ed-67f3eeef891d\") " pod="openstack/ovn-controller-ovs-2t2j4" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.033969 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/04573f28-a6e2-46ca-8a02-a2265c5d68e9-var-run\") pod \"ovn-controller-7dl26\" (UID: \"04573f28-a6e2-46ca-8a02-a2265c5d68e9\") " pod="openstack/ovn-controller-7dl26" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.033993 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/04573f28-a6e2-46ca-8a02-a2265c5d68e9-scripts\") pod \"ovn-controller-7dl26\" (UID: \"04573f28-a6e2-46ca-8a02-a2265c5d68e9\") " pod="openstack/ovn-controller-7dl26" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.034073 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/f1553410-0e8c-4a68-89ed-67f3eeef891d-var-run\") pod \"ovn-controller-ovs-2t2j4\" (UID: \"f1553410-0e8c-4a68-89ed-67f3eeef891d\") " pod="openstack/ovn-controller-ovs-2t2j4" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.034096 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/f1553410-0e8c-4a68-89ed-67f3eeef891d-var-lib\") pod \"ovn-controller-ovs-2t2j4\" (UID: \"f1553410-0e8c-4a68-89ed-67f3eeef891d\") " pod="openstack/ovn-controller-ovs-2t2j4" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.034148 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/04573f28-a6e2-46ca-8a02-a2265c5d68e9-var-log-ovn\") pod \"ovn-controller-7dl26\" (UID: \"04573f28-a6e2-46ca-8a02-a2265c5d68e9\") " pod="openstack/ovn-controller-7dl26" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.034265 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/04573f28-a6e2-46ca-8a02-a2265c5d68e9-ovn-controller-tls-certs\") pod \"ovn-controller-7dl26\" (UID: \"04573f28-a6e2-46ca-8a02-a2265c5d68e9\") " pod="openstack/ovn-controller-7dl26" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.034293 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04573f28-a6e2-46ca-8a02-a2265c5d68e9-combined-ca-bundle\") pod \"ovn-controller-7dl26\" (UID: \"04573f28-a6e2-46ca-8a02-a2265c5d68e9\") " pod="openstack/ovn-controller-7dl26" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.034324 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/f1553410-0e8c-4a68-89ed-67f3eeef891d-etc-ovs\") pod \"ovn-controller-ovs-2t2j4\" (UID: \"f1553410-0e8c-4a68-89ed-67f3eeef891d\") " pod="openstack/ovn-controller-ovs-2t2j4" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.034344 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mpw28\" (UniqueName: \"kubernetes.io/projected/f1553410-0e8c-4a68-89ed-67f3eeef891d-kube-api-access-mpw28\") pod \"ovn-controller-ovs-2t2j4\" (UID: \"f1553410-0e8c-4a68-89ed-67f3eeef891d\") " pod="openstack/ovn-controller-ovs-2t2j4" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.034376 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/04573f28-a6e2-46ca-8a02-a2265c5d68e9-var-run-ovn\") pod \"ovn-controller-7dl26\" (UID: \"04573f28-a6e2-46ca-8a02-a2265c5d68e9\") " pod="openstack/ovn-controller-7dl26" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.034416 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f1553410-0e8c-4a68-89ed-67f3eeef891d-scripts\") pod \"ovn-controller-ovs-2t2j4\" (UID: \"f1553410-0e8c-4a68-89ed-67f3eeef891d\") " pod="openstack/ovn-controller-ovs-2t2j4" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.136409 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/f1553410-0e8c-4a68-89ed-67f3eeef891d-var-lib\") pod \"ovn-controller-ovs-2t2j4\" (UID: \"f1553410-0e8c-4a68-89ed-67f3eeef891d\") " pod="openstack/ovn-controller-ovs-2t2j4" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.136461 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/04573f28-a6e2-46ca-8a02-a2265c5d68e9-var-log-ovn\") pod \"ovn-controller-7dl26\" (UID: \"04573f28-a6e2-46ca-8a02-a2265c5d68e9\") " pod="openstack/ovn-controller-7dl26" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.136492 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/04573f28-a6e2-46ca-8a02-a2265c5d68e9-ovn-controller-tls-certs\") pod \"ovn-controller-7dl26\" (UID: \"04573f28-a6e2-46ca-8a02-a2265c5d68e9\") " pod="openstack/ovn-controller-7dl26" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.136511 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04573f28-a6e2-46ca-8a02-a2265c5d68e9-combined-ca-bundle\") pod \"ovn-controller-7dl26\" (UID: \"04573f28-a6e2-46ca-8a02-a2265c5d68e9\") " pod="openstack/ovn-controller-7dl26" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.136536 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/f1553410-0e8c-4a68-89ed-67f3eeef891d-etc-ovs\") pod \"ovn-controller-ovs-2t2j4\" (UID: \"f1553410-0e8c-4a68-89ed-67f3eeef891d\") " pod="openstack/ovn-controller-ovs-2t2j4" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.136558 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mpw28\" (UniqueName: \"kubernetes.io/projected/f1553410-0e8c-4a68-89ed-67f3eeef891d-kube-api-access-mpw28\") pod \"ovn-controller-ovs-2t2j4\" (UID: \"f1553410-0e8c-4a68-89ed-67f3eeef891d\") " pod="openstack/ovn-controller-ovs-2t2j4" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.136589 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/04573f28-a6e2-46ca-8a02-a2265c5d68e9-var-run-ovn\") pod \"ovn-controller-7dl26\" (UID: \"04573f28-a6e2-46ca-8a02-a2265c5d68e9\") " pod="openstack/ovn-controller-7dl26" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.136620 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f1553410-0e8c-4a68-89ed-67f3eeef891d-scripts\") pod \"ovn-controller-ovs-2t2j4\" (UID: \"f1553410-0e8c-4a68-89ed-67f3eeef891d\") " pod="openstack/ovn-controller-ovs-2t2j4" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.136664 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xwjbh\" (UniqueName: \"kubernetes.io/projected/04573f28-a6e2-46ca-8a02-a2265c5d68e9-kube-api-access-xwjbh\") pod \"ovn-controller-7dl26\" (UID: \"04573f28-a6e2-46ca-8a02-a2265c5d68e9\") " pod="openstack/ovn-controller-7dl26" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.136701 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f1553410-0e8c-4a68-89ed-67f3eeef891d-var-log\") pod \"ovn-controller-ovs-2t2j4\" (UID: \"f1553410-0e8c-4a68-89ed-67f3eeef891d\") " pod="openstack/ovn-controller-ovs-2t2j4" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.136733 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/04573f28-a6e2-46ca-8a02-a2265c5d68e9-var-run\") pod \"ovn-controller-7dl26\" (UID: \"04573f28-a6e2-46ca-8a02-a2265c5d68e9\") " pod="openstack/ovn-controller-7dl26" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.136754 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/04573f28-a6e2-46ca-8a02-a2265c5d68e9-scripts\") pod \"ovn-controller-7dl26\" (UID: \"04573f28-a6e2-46ca-8a02-a2265c5d68e9\") " pod="openstack/ovn-controller-7dl26" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.136823 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/f1553410-0e8c-4a68-89ed-67f3eeef891d-var-run\") pod \"ovn-controller-ovs-2t2j4\" (UID: \"f1553410-0e8c-4a68-89ed-67f3eeef891d\") " pod="openstack/ovn-controller-ovs-2t2j4" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.137063 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/f1553410-0e8c-4a68-89ed-67f3eeef891d-etc-ovs\") pod \"ovn-controller-ovs-2t2j4\" (UID: \"f1553410-0e8c-4a68-89ed-67f3eeef891d\") " pod="openstack/ovn-controller-ovs-2t2j4" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.137199 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/04573f28-a6e2-46ca-8a02-a2265c5d68e9-var-log-ovn\") pod \"ovn-controller-7dl26\" (UID: \"04573f28-a6e2-46ca-8a02-a2265c5d68e9\") " pod="openstack/ovn-controller-7dl26" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.137250 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/f1553410-0e8c-4a68-89ed-67f3eeef891d-var-run\") pod \"ovn-controller-ovs-2t2j4\" (UID: \"f1553410-0e8c-4a68-89ed-67f3eeef891d\") " pod="openstack/ovn-controller-ovs-2t2j4" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.137592 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/f1553410-0e8c-4a68-89ed-67f3eeef891d-var-lib\") pod \"ovn-controller-ovs-2t2j4\" (UID: \"f1553410-0e8c-4a68-89ed-67f3eeef891d\") " pod="openstack/ovn-controller-ovs-2t2j4" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.137942 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f1553410-0e8c-4a68-89ed-67f3eeef891d-var-log\") pod \"ovn-controller-ovs-2t2j4\" (UID: \"f1553410-0e8c-4a68-89ed-67f3eeef891d\") " pod="openstack/ovn-controller-ovs-2t2j4" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.137965 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/04573f28-a6e2-46ca-8a02-a2265c5d68e9-var-run\") pod \"ovn-controller-7dl26\" (UID: \"04573f28-a6e2-46ca-8a02-a2265c5d68e9\") " pod="openstack/ovn-controller-7dl26" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.138042 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/04573f28-a6e2-46ca-8a02-a2265c5d68e9-var-run-ovn\") pod \"ovn-controller-7dl26\" (UID: \"04573f28-a6e2-46ca-8a02-a2265c5d68e9\") " pod="openstack/ovn-controller-7dl26" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.140002 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/04573f28-a6e2-46ca-8a02-a2265c5d68e9-scripts\") pod \"ovn-controller-7dl26\" (UID: \"04573f28-a6e2-46ca-8a02-a2265c5d68e9\") " pod="openstack/ovn-controller-7dl26" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.140776 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f1553410-0e8c-4a68-89ed-67f3eeef891d-scripts\") pod \"ovn-controller-ovs-2t2j4\" (UID: \"f1553410-0e8c-4a68-89ed-67f3eeef891d\") " pod="openstack/ovn-controller-ovs-2t2j4" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.142771 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/04573f28-a6e2-46ca-8a02-a2265c5d68e9-ovn-controller-tls-certs\") pod \"ovn-controller-7dl26\" (UID: \"04573f28-a6e2-46ca-8a02-a2265c5d68e9\") " pod="openstack/ovn-controller-7dl26" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.143212 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04573f28-a6e2-46ca-8a02-a2265c5d68e9-combined-ca-bundle\") pod \"ovn-controller-7dl26\" (UID: \"04573f28-a6e2-46ca-8a02-a2265c5d68e9\") " pod="openstack/ovn-controller-7dl26" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.153224 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xwjbh\" (UniqueName: \"kubernetes.io/projected/04573f28-a6e2-46ca-8a02-a2265c5d68e9-kube-api-access-xwjbh\") pod \"ovn-controller-7dl26\" (UID: \"04573f28-a6e2-46ca-8a02-a2265c5d68e9\") " pod="openstack/ovn-controller-7dl26" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.159156 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mpw28\" (UniqueName: \"kubernetes.io/projected/f1553410-0e8c-4a68-89ed-67f3eeef891d-kube-api-access-mpw28\") pod \"ovn-controller-ovs-2t2j4\" (UID: \"f1553410-0e8c-4a68-89ed-67f3eeef891d\") " pod="openstack/ovn-controller-ovs-2t2j4" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.311908 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-7dl26" Nov 25 09:56:03 crc kubenswrapper[4854]: I1125 09:56:03.317798 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-2t2j4" Nov 25 09:56:04 crc kubenswrapper[4854]: I1125 09:56:04.107195 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 25 09:56:04 crc kubenswrapper[4854]: I1125 09:56:04.109489 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 25 09:56:04 crc kubenswrapper[4854]: I1125 09:56:04.112213 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 25 09:56:04 crc kubenswrapper[4854]: I1125 09:56:04.112522 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 25 09:56:04 crc kubenswrapper[4854]: I1125 09:56:04.112783 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 25 09:56:04 crc kubenswrapper[4854]: I1125 09:56:04.112955 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 25 09:56:04 crc kubenswrapper[4854]: I1125 09:56:04.114363 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-h5jtw" Nov 25 09:56:04 crc kubenswrapper[4854]: I1125 09:56:04.162586 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 25 09:56:04 crc kubenswrapper[4854]: I1125 09:56:04.164153 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e868f46b-cfbf-4642-ad85-c32884fca542-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"e868f46b-cfbf-4642-ad85-c32884fca542\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:56:04 crc kubenswrapper[4854]: I1125 09:56:04.164226 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e868f46b-cfbf-4642-ad85-c32884fca542-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"e868f46b-cfbf-4642-ad85-c32884fca542\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:56:04 crc kubenswrapper[4854]: I1125 09:56:04.164322 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e868f46b-cfbf-4642-ad85-c32884fca542-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"e868f46b-cfbf-4642-ad85-c32884fca542\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:56:04 crc kubenswrapper[4854]: I1125 09:56:04.164356 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e868f46b-cfbf-4642-ad85-c32884fca542-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"e868f46b-cfbf-4642-ad85-c32884fca542\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:56:04 crc kubenswrapper[4854]: I1125 09:56:04.164546 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gn6bf\" (UniqueName: \"kubernetes.io/projected/e868f46b-cfbf-4642-ad85-c32884fca542-kube-api-access-gn6bf\") pod \"ovsdbserver-nb-0\" (UID: \"e868f46b-cfbf-4642-ad85-c32884fca542\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:56:04 crc kubenswrapper[4854]: I1125 09:56:04.164623 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"e868f46b-cfbf-4642-ad85-c32884fca542\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:56:04 crc kubenswrapper[4854]: I1125 09:56:04.164643 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e868f46b-cfbf-4642-ad85-c32884fca542-config\") pod \"ovsdbserver-nb-0\" (UID: \"e868f46b-cfbf-4642-ad85-c32884fca542\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:56:04 crc kubenswrapper[4854]: I1125 09:56:04.164717 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e868f46b-cfbf-4642-ad85-c32884fca542-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"e868f46b-cfbf-4642-ad85-c32884fca542\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:56:04 crc kubenswrapper[4854]: I1125 09:56:04.265973 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e868f46b-cfbf-4642-ad85-c32884fca542-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"e868f46b-cfbf-4642-ad85-c32884fca542\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:56:04 crc kubenswrapper[4854]: I1125 09:56:04.266053 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e868f46b-cfbf-4642-ad85-c32884fca542-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"e868f46b-cfbf-4642-ad85-c32884fca542\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:56:04 crc kubenswrapper[4854]: I1125 09:56:04.266076 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e868f46b-cfbf-4642-ad85-c32884fca542-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"e868f46b-cfbf-4642-ad85-c32884fca542\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:56:04 crc kubenswrapper[4854]: I1125 09:56:04.266153 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gn6bf\" (UniqueName: \"kubernetes.io/projected/e868f46b-cfbf-4642-ad85-c32884fca542-kube-api-access-gn6bf\") pod \"ovsdbserver-nb-0\" (UID: \"e868f46b-cfbf-4642-ad85-c32884fca542\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:56:04 crc kubenswrapper[4854]: I1125 09:56:04.266213 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"e868f46b-cfbf-4642-ad85-c32884fca542\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:56:04 crc kubenswrapper[4854]: I1125 09:56:04.266244 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e868f46b-cfbf-4642-ad85-c32884fca542-config\") pod \"ovsdbserver-nb-0\" (UID: \"e868f46b-cfbf-4642-ad85-c32884fca542\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:56:04 crc kubenswrapper[4854]: I1125 09:56:04.266287 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e868f46b-cfbf-4642-ad85-c32884fca542-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"e868f46b-cfbf-4642-ad85-c32884fca542\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:56:04 crc kubenswrapper[4854]: I1125 09:56:04.266363 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e868f46b-cfbf-4642-ad85-c32884fca542-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"e868f46b-cfbf-4642-ad85-c32884fca542\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:56:04 crc kubenswrapper[4854]: I1125 09:56:04.266498 4854 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"e868f46b-cfbf-4642-ad85-c32884fca542\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/ovsdbserver-nb-0" Nov 25 09:56:04 crc kubenswrapper[4854]: I1125 09:56:04.267075 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e868f46b-cfbf-4642-ad85-c32884fca542-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"e868f46b-cfbf-4642-ad85-c32884fca542\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:56:04 crc kubenswrapper[4854]: I1125 09:56:04.267611 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e868f46b-cfbf-4642-ad85-c32884fca542-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"e868f46b-cfbf-4642-ad85-c32884fca542\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:56:04 crc kubenswrapper[4854]: I1125 09:56:04.267980 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e868f46b-cfbf-4642-ad85-c32884fca542-config\") pod \"ovsdbserver-nb-0\" (UID: \"e868f46b-cfbf-4642-ad85-c32884fca542\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:56:04 crc kubenswrapper[4854]: I1125 09:56:04.270829 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e868f46b-cfbf-4642-ad85-c32884fca542-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"e868f46b-cfbf-4642-ad85-c32884fca542\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:56:04 crc kubenswrapper[4854]: I1125 09:56:04.287419 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e868f46b-cfbf-4642-ad85-c32884fca542-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"e868f46b-cfbf-4642-ad85-c32884fca542\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:56:04 crc kubenswrapper[4854]: I1125 09:56:04.287965 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e868f46b-cfbf-4642-ad85-c32884fca542-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"e868f46b-cfbf-4642-ad85-c32884fca542\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:56:04 crc kubenswrapper[4854]: I1125 09:56:04.290532 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gn6bf\" (UniqueName: \"kubernetes.io/projected/e868f46b-cfbf-4642-ad85-c32884fca542-kube-api-access-gn6bf\") pod \"ovsdbserver-nb-0\" (UID: \"e868f46b-cfbf-4642-ad85-c32884fca542\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:56:04 crc kubenswrapper[4854]: I1125 09:56:04.293243 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"e868f46b-cfbf-4642-ad85-c32884fca542\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:56:04 crc kubenswrapper[4854]: I1125 09:56:04.443453 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 25 09:56:05 crc kubenswrapper[4854]: I1125 09:56:05.009315 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 09:56:05 crc kubenswrapper[4854]: I1125 09:56:05.301844 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"0984ac84-1833-4ddb-b21b-d526b64e9991","Type":"ContainerStarted","Data":"e8924c44c5135c35529b20587826b42df7ee4c5d7e234fc3d32882ae02779595"} Nov 25 09:56:07 crc kubenswrapper[4854]: I1125 09:56:07.527263 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 25 09:56:07 crc kubenswrapper[4854]: I1125 09:56:07.530490 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 25 09:56:07 crc kubenswrapper[4854]: I1125 09:56:07.533813 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 25 09:56:07 crc kubenswrapper[4854]: I1125 09:56:07.533894 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 25 09:56:07 crc kubenswrapper[4854]: I1125 09:56:07.533948 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-hx8ql" Nov 25 09:56:07 crc kubenswrapper[4854]: I1125 09:56:07.535197 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 25 09:56:07 crc kubenswrapper[4854]: I1125 09:56:07.554980 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 25 09:56:07 crc kubenswrapper[4854]: I1125 09:56:07.639217 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"ovsdbserver-sb-0\" (UID: \"1ce61789-25f3-44d3-813d-d51d10d068f1\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:56:07 crc kubenswrapper[4854]: I1125 09:56:07.639277 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1ce61789-25f3-44d3-813d-d51d10d068f1-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"1ce61789-25f3-44d3-813d-d51d10d068f1\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:56:07 crc kubenswrapper[4854]: I1125 09:56:07.639343 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1ce61789-25f3-44d3-813d-d51d10d068f1-config\") pod \"ovsdbserver-sb-0\" (UID: \"1ce61789-25f3-44d3-813d-d51d10d068f1\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:56:07 crc kubenswrapper[4854]: I1125 09:56:07.639387 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nwc46\" (UniqueName: \"kubernetes.io/projected/1ce61789-25f3-44d3-813d-d51d10d068f1-kube-api-access-nwc46\") pod \"ovsdbserver-sb-0\" (UID: \"1ce61789-25f3-44d3-813d-d51d10d068f1\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:56:07 crc kubenswrapper[4854]: I1125 09:56:07.639414 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ce61789-25f3-44d3-813d-d51d10d068f1-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"1ce61789-25f3-44d3-813d-d51d10d068f1\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:56:07 crc kubenswrapper[4854]: I1125 09:56:07.639437 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ce61789-25f3-44d3-813d-d51d10d068f1-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"1ce61789-25f3-44d3-813d-d51d10d068f1\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:56:07 crc kubenswrapper[4854]: I1125 09:56:07.639480 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ce61789-25f3-44d3-813d-d51d10d068f1-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"1ce61789-25f3-44d3-813d-d51d10d068f1\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:56:07 crc kubenswrapper[4854]: I1125 09:56:07.639510 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1ce61789-25f3-44d3-813d-d51d10d068f1-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"1ce61789-25f3-44d3-813d-d51d10d068f1\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:56:07 crc kubenswrapper[4854]: I1125 09:56:07.741552 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"ovsdbserver-sb-0\" (UID: \"1ce61789-25f3-44d3-813d-d51d10d068f1\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:56:07 crc kubenswrapper[4854]: I1125 09:56:07.741609 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1ce61789-25f3-44d3-813d-d51d10d068f1-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"1ce61789-25f3-44d3-813d-d51d10d068f1\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:56:07 crc kubenswrapper[4854]: I1125 09:56:07.741661 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1ce61789-25f3-44d3-813d-d51d10d068f1-config\") pod \"ovsdbserver-sb-0\" (UID: \"1ce61789-25f3-44d3-813d-d51d10d068f1\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:56:07 crc kubenswrapper[4854]: I1125 09:56:07.741719 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nwc46\" (UniqueName: \"kubernetes.io/projected/1ce61789-25f3-44d3-813d-d51d10d068f1-kube-api-access-nwc46\") pod \"ovsdbserver-sb-0\" (UID: \"1ce61789-25f3-44d3-813d-d51d10d068f1\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:56:07 crc kubenswrapper[4854]: I1125 09:56:07.741747 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ce61789-25f3-44d3-813d-d51d10d068f1-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"1ce61789-25f3-44d3-813d-d51d10d068f1\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:56:07 crc kubenswrapper[4854]: I1125 09:56:07.741763 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ce61789-25f3-44d3-813d-d51d10d068f1-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"1ce61789-25f3-44d3-813d-d51d10d068f1\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:56:07 crc kubenswrapper[4854]: I1125 09:56:07.741800 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ce61789-25f3-44d3-813d-d51d10d068f1-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"1ce61789-25f3-44d3-813d-d51d10d068f1\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:56:07 crc kubenswrapper[4854]: I1125 09:56:07.741825 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1ce61789-25f3-44d3-813d-d51d10d068f1-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"1ce61789-25f3-44d3-813d-d51d10d068f1\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:56:07 crc kubenswrapper[4854]: I1125 09:56:07.742530 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1ce61789-25f3-44d3-813d-d51d10d068f1-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"1ce61789-25f3-44d3-813d-d51d10d068f1\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:56:07 crc kubenswrapper[4854]: I1125 09:56:07.742787 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1ce61789-25f3-44d3-813d-d51d10d068f1-config\") pod \"ovsdbserver-sb-0\" (UID: \"1ce61789-25f3-44d3-813d-d51d10d068f1\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:56:07 crc kubenswrapper[4854]: I1125 09:56:07.742913 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1ce61789-25f3-44d3-813d-d51d10d068f1-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"1ce61789-25f3-44d3-813d-d51d10d068f1\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:56:07 crc kubenswrapper[4854]: I1125 09:56:07.743015 4854 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"ovsdbserver-sb-0\" (UID: \"1ce61789-25f3-44d3-813d-d51d10d068f1\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/ovsdbserver-sb-0" Nov 25 09:56:07 crc kubenswrapper[4854]: I1125 09:56:07.751263 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ce61789-25f3-44d3-813d-d51d10d068f1-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"1ce61789-25f3-44d3-813d-d51d10d068f1\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:56:07 crc kubenswrapper[4854]: I1125 09:56:07.751332 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ce61789-25f3-44d3-813d-d51d10d068f1-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"1ce61789-25f3-44d3-813d-d51d10d068f1\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:56:07 crc kubenswrapper[4854]: I1125 09:56:07.751716 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ce61789-25f3-44d3-813d-d51d10d068f1-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"1ce61789-25f3-44d3-813d-d51d10d068f1\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:56:07 crc kubenswrapper[4854]: I1125 09:56:07.764491 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nwc46\" (UniqueName: \"kubernetes.io/projected/1ce61789-25f3-44d3-813d-d51d10d068f1-kube-api-access-nwc46\") pod \"ovsdbserver-sb-0\" (UID: \"1ce61789-25f3-44d3-813d-d51d10d068f1\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:56:07 crc kubenswrapper[4854]: I1125 09:56:07.788786 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"ovsdbserver-sb-0\" (UID: \"1ce61789-25f3-44d3-813d-d51d10d068f1\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:56:07 crc kubenswrapper[4854]: I1125 09:56:07.850416 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 25 09:56:09 crc kubenswrapper[4854]: W1125 09:56:09.337766 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod575fe5df_5a76_4633_9688_3997a708f3f4.slice/crio-4b4e527f168e77fa66753edca299b2df7f7dbcee7e5edb7d180baa2e35bb87be WatchSource:0}: Error finding container 4b4e527f168e77fa66753edca299b2df7f7dbcee7e5edb7d180baa2e35bb87be: Status 404 returned error can't find the container with id 4b4e527f168e77fa66753edca299b2df7f7dbcee7e5edb7d180baa2e35bb87be Nov 25 09:56:09 crc kubenswrapper[4854]: I1125 09:56:09.860628 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-1"] Nov 25 09:56:09 crc kubenswrapper[4854]: I1125 09:56:09.874044 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 09:56:09 crc kubenswrapper[4854]: I1125 09:56:09.884399 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-2"] Nov 25 09:56:10 crc kubenswrapper[4854]: I1125 09:56:10.089152 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-ui-dashboards-7d5fb4cbfb-c6npv"] Nov 25 09:56:10 crc kubenswrapper[4854]: I1125 09:56:10.355603 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"575fe5df-5a76-4633-9688-3997a708f3f4","Type":"ContainerStarted","Data":"4b4e527f168e77fa66753edca299b2df7f7dbcee7e5edb7d180baa2e35bb87be"} Nov 25 09:56:10 crc kubenswrapper[4854]: W1125 09:56:10.577844 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddc586641_37b8_4b9b_8479_a3c552bec71d.slice/crio-2c5f4b5c111a680106b37dde0493ba1d7eb69ffc35b6bdb885b1ccd993890d4b WatchSource:0}: Error finding container 2c5f4b5c111a680106b37dde0493ba1d7eb69ffc35b6bdb885b1ccd993890d4b: Status 404 returned error can't find the container with id 2c5f4b5c111a680106b37dde0493ba1d7eb69ffc35b6bdb885b1ccd993890d4b Nov 25 09:56:10 crc kubenswrapper[4854]: E1125 09:56:10.740936 4854 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 25 09:56:10 crc kubenswrapper[4854]: E1125 09:56:10.741306 4854 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pdvlc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-qmvj4_openstack(181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:56:10 crc kubenswrapper[4854]: E1125 09:56:10.742428 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-qmvj4" podUID="181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79" Nov 25 09:56:11 crc kubenswrapper[4854]: I1125 09:56:11.005248 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 25 09:56:11 crc kubenswrapper[4854]: E1125 09:56:11.345872 4854 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 25 09:56:11 crc kubenswrapper[4854]: E1125 09:56:11.347740 4854 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-v7zq4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-r8fm6_openstack(1e8f46ac-8fb7-4161-85bb-25af767745b0): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:56:11 crc kubenswrapper[4854]: E1125 09:56:11.349088 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-r8fm6" podUID="1e8f46ac-8fb7-4161-85bb-25af767745b0" Nov 25 09:56:11 crc kubenswrapper[4854]: I1125 09:56:11.366958 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"6894f0be-f53f-401b-8707-4cc0cfd020dc","Type":"ContainerStarted","Data":"0670d50acf9d21a887e1044760c2332b9d3892cf49dd461fc14f468eba91a844"} Nov 25 09:56:11 crc kubenswrapper[4854]: I1125 09:56:11.369058 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"dc586641-37b8-4b9b-8479-a3c552bec71d","Type":"ContainerStarted","Data":"2c5f4b5c111a680106b37dde0493ba1d7eb69ffc35b6bdb885b1ccd993890d4b"} Nov 25 09:56:11 crc kubenswrapper[4854]: I1125 09:56:11.371126 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"c24229dd-3c9c-47b6-8080-a1d51e0e6868","Type":"ContainerStarted","Data":"8cee4606fda48fe9bc42eeeb24883c30e5d6d60a1f564fc6a2aece994a2bd598"} Nov 25 09:56:11 crc kubenswrapper[4854]: I1125 09:56:11.373112 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"2f639da7-7576-4274-94c5-4304b6af9b4d","Type":"ContainerStarted","Data":"25b8e90cb8fb16ab67bfc456c67c10be21a85addb82cf82e21c88814b40c3c7c"} Nov 25 09:56:11 crc kubenswrapper[4854]: I1125 09:56:11.374369 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-c6npv" event={"ID":"a5aeba85-24cb-48ef-9050-93a40b4d67f0","Type":"ContainerStarted","Data":"a9d7a16cdb3f564354eb806d571b7170756f84c4982e6deb05458b4629faafe6"} Nov 25 09:56:11 crc kubenswrapper[4854]: I1125 09:56:11.472940 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-7c6c5466d9-g4cjl"] Nov 25 09:56:11 crc kubenswrapper[4854]: I1125 09:56:11.487131 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 09:56:11 crc kubenswrapper[4854]: I1125 09:56:11.494719 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 09:56:11 crc kubenswrapper[4854]: W1125 09:56:11.503809 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod827c4948_d1bc_4c63_838b_57f267bdcf93.slice/crio-da8058fc38ced063af6b64ca395a966cb96234e402cd822475d9437ad1ee2959 WatchSource:0}: Error finding container da8058fc38ced063af6b64ca395a966cb96234e402cd822475d9437ad1ee2959: Status 404 returned error can't find the container with id da8058fc38ced063af6b64ca395a966cb96234e402cd822475d9437ad1ee2959 Nov 25 09:56:11 crc kubenswrapper[4854]: I1125 09:56:11.952487 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-7dl26"] Nov 25 09:56:11 crc kubenswrapper[4854]: I1125 09:56:11.963142 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 09:56:11 crc kubenswrapper[4854]: W1125 09:56:11.970605 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda52007f7_ea68_422e_90d3_be11aa0184d5.slice/crio-3d7a10d837a3a4f4751178cc7e6b621048bc97c7bbc1d3068336eec6f2ba8cdb WatchSource:0}: Error finding container 3d7a10d837a3a4f4751178cc7e6b621048bc97c7bbc1d3068336eec6f2ba8cdb: Status 404 returned error can't find the container with id 3d7a10d837a3a4f4751178cc7e6b621048bc97c7bbc1d3068336eec6f2ba8cdb Nov 25 09:56:12 crc kubenswrapper[4854]: I1125 09:56:12.010353 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-r8fm6" Nov 25 09:56:12 crc kubenswrapper[4854]: I1125 09:56:12.018835 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-qmvj4" Nov 25 09:56:12 crc kubenswrapper[4854]: I1125 09:56:12.171108 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79-config\") pod \"181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79\" (UID: \"181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79\") " Nov 25 09:56:12 crc kubenswrapper[4854]: I1125 09:56:12.171559 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79-dns-svc\") pod \"181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79\" (UID: \"181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79\") " Nov 25 09:56:12 crc kubenswrapper[4854]: I1125 09:56:12.171641 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v7zq4\" (UniqueName: \"kubernetes.io/projected/1e8f46ac-8fb7-4161-85bb-25af767745b0-kube-api-access-v7zq4\") pod \"1e8f46ac-8fb7-4161-85bb-25af767745b0\" (UID: \"1e8f46ac-8fb7-4161-85bb-25af767745b0\") " Nov 25 09:56:12 crc kubenswrapper[4854]: I1125 09:56:12.171782 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pdvlc\" (UniqueName: \"kubernetes.io/projected/181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79-kube-api-access-pdvlc\") pod \"181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79\" (UID: \"181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79\") " Nov 25 09:56:12 crc kubenswrapper[4854]: I1125 09:56:12.171863 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e8f46ac-8fb7-4161-85bb-25af767745b0-config\") pod \"1e8f46ac-8fb7-4161-85bb-25af767745b0\" (UID: \"1e8f46ac-8fb7-4161-85bb-25af767745b0\") " Nov 25 09:56:12 crc kubenswrapper[4854]: I1125 09:56:12.172088 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79-config" (OuterVolumeSpecName: "config") pod "181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79" (UID: "181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:56:12 crc kubenswrapper[4854]: I1125 09:56:12.172166 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79" (UID: "181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:56:12 crc kubenswrapper[4854]: I1125 09:56:12.173543 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:12 crc kubenswrapper[4854]: I1125 09:56:12.173574 4854 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:12 crc kubenswrapper[4854]: I1125 09:56:12.179001 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1e8f46ac-8fb7-4161-85bb-25af767745b0-config" (OuterVolumeSpecName: "config") pod "1e8f46ac-8fb7-4161-85bb-25af767745b0" (UID: "1e8f46ac-8fb7-4161-85bb-25af767745b0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:56:12 crc kubenswrapper[4854]: I1125 09:56:12.179136 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79-kube-api-access-pdvlc" (OuterVolumeSpecName: "kube-api-access-pdvlc") pod "181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79" (UID: "181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79"). InnerVolumeSpecName "kube-api-access-pdvlc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:56:12 crc kubenswrapper[4854]: I1125 09:56:12.179261 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e8f46ac-8fb7-4161-85bb-25af767745b0-kube-api-access-v7zq4" (OuterVolumeSpecName: "kube-api-access-v7zq4") pod "1e8f46ac-8fb7-4161-85bb-25af767745b0" (UID: "1e8f46ac-8fb7-4161-85bb-25af767745b0"). InnerVolumeSpecName "kube-api-access-v7zq4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:56:12 crc kubenswrapper[4854]: I1125 09:56:12.275299 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v7zq4\" (UniqueName: \"kubernetes.io/projected/1e8f46ac-8fb7-4161-85bb-25af767745b0-kube-api-access-v7zq4\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:12 crc kubenswrapper[4854]: I1125 09:56:12.276129 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pdvlc\" (UniqueName: \"kubernetes.io/projected/181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79-kube-api-access-pdvlc\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:12 crc kubenswrapper[4854]: I1125 09:56:12.276147 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e8f46ac-8fb7-4161-85bb-25af767745b0-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:12 crc kubenswrapper[4854]: I1125 09:56:12.387120 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"827c4948-d1bc-4c63-838b-57f267bdcf93","Type":"ContainerStarted","Data":"da8058fc38ced063af6b64ca395a966cb96234e402cd822475d9437ad1ee2959"} Nov 25 09:56:12 crc kubenswrapper[4854]: I1125 09:56:12.389020 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"a52007f7-ea68-422e-90d3-be11aa0184d5","Type":"ContainerStarted","Data":"3d7a10d837a3a4f4751178cc7e6b621048bc97c7bbc1d3068336eec6f2ba8cdb"} Nov 25 09:56:12 crc kubenswrapper[4854]: I1125 09:56:12.390764 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-7dl26" event={"ID":"04573f28-a6e2-46ca-8a02-a2265c5d68e9","Type":"ContainerStarted","Data":"32292238550fe707ebd40899805ddb3f35756a074b65cc4062f2f3351eda4177"} Nov 25 09:56:12 crc kubenswrapper[4854]: I1125 09:56:12.392686 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"93c535b4-23bb-4c71-8ddc-1304ca205e55","Type":"ContainerStarted","Data":"fd04f3d1c9e26c07b6ea0f597a2b329dcacad50e683e5ce007eedbc90d7979fe"} Nov 25 09:56:12 crc kubenswrapper[4854]: I1125 09:56:12.394471 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-r8fm6" event={"ID":"1e8f46ac-8fb7-4161-85bb-25af767745b0","Type":"ContainerDied","Data":"3e85fefed062aa6fc528078e2bea4eb0759a6cf1fb0725bbeb639296c4a808a7"} Nov 25 09:56:12 crc kubenswrapper[4854]: I1125 09:56:12.394483 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-r8fm6" Nov 25 09:56:12 crc kubenswrapper[4854]: I1125 09:56:12.396082 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7c6c5466d9-g4cjl" event={"ID":"f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3","Type":"ContainerStarted","Data":"0802c02eb54d3d771f71546b3f8715fd766e00590c84212dc5583bb7911f3ce4"} Nov 25 09:56:12 crc kubenswrapper[4854]: I1125 09:56:12.396134 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7c6c5466d9-g4cjl" event={"ID":"f9ff3524-0ae1-4d54-b74f-bc8bc06e40c3","Type":"ContainerStarted","Data":"d7a76ec78a879717d29be4ef65d78c5e7cda86cb2a71aca1fc7ea8403993b365"} Nov 25 09:56:12 crc kubenswrapper[4854]: I1125 09:56:12.398654 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-qmvj4" event={"ID":"181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79","Type":"ContainerDied","Data":"427b2ba4c465e7317c98ca37e177f7adf6a4315ee5ec91eef0fcc5536bf3d55d"} Nov 25 09:56:12 crc kubenswrapper[4854]: I1125 09:56:12.398712 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-qmvj4" Nov 25 09:56:12 crc kubenswrapper[4854]: I1125 09:56:12.411980 4854 generic.go:334] "Generic (PLEG): container finished" podID="967a931c-cbe4-4085-b38b-60399485791d" containerID="2bad04825ff422fba7e27adfcfde3a3f15ea7f17719c61e4b194e5b6c44d2e2e" exitCode=0 Nov 25 09:56:12 crc kubenswrapper[4854]: I1125 09:56:12.412100 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-l6qff" event={"ID":"967a931c-cbe4-4085-b38b-60399485791d","Type":"ContainerDied","Data":"2bad04825ff422fba7e27adfcfde3a3f15ea7f17719c61e4b194e5b6c44d2e2e"} Nov 25 09:56:12 crc kubenswrapper[4854]: I1125 09:56:12.436147 4854 generic.go:334] "Generic (PLEG): container finished" podID="2cd76364-963f-4af7-83ef-cc73ab247e14" containerID="f863907b32ffb9e7f37284c188c57e7a922df4e7093e7d6ecc4e4896fa5d8575" exitCode=0 Nov 25 09:56:12 crc kubenswrapper[4854]: I1125 09:56:12.436202 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-twfb7" event={"ID":"2cd76364-963f-4af7-83ef-cc73ab247e14","Type":"ContainerDied","Data":"f863907b32ffb9e7f37284c188c57e7a922df4e7093e7d6ecc4e4896fa5d8575"} Nov 25 09:56:12 crc kubenswrapper[4854]: I1125 09:56:12.464593 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-7c6c5466d9-g4cjl" podStartSLOduration=11.46457038 podStartE2EDuration="11.46457038s" podCreationTimestamp="2025-11-25 09:56:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:56:12.434043282 +0000 UTC m=+1178.287036688" watchObservedRunningTime="2025-11-25 09:56:12.46457038 +0000 UTC m=+1178.317563756" Nov 25 09:56:12 crc kubenswrapper[4854]: I1125 09:56:12.530639 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-qmvj4"] Nov 25 09:56:12 crc kubenswrapper[4854]: I1125 09:56:12.553903 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-qmvj4"] Nov 25 09:56:12 crc kubenswrapper[4854]: I1125 09:56:12.593764 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-r8fm6"] Nov 25 09:56:12 crc kubenswrapper[4854]: I1125 09:56:12.607661 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-r8fm6"] Nov 25 09:56:13 crc kubenswrapper[4854]: I1125 09:56:13.047257 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79" path="/var/lib/kubelet/pods/181f45e6-ad6a-45ea-ba5a-03ae9ed7fd79/volumes" Nov 25 09:56:13 crc kubenswrapper[4854]: I1125 09:56:13.047948 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1e8f46ac-8fb7-4161-85bb-25af767745b0" path="/var/lib/kubelet/pods/1e8f46ac-8fb7-4161-85bb-25af767745b0/volumes" Nov 25 09:56:13 crc kubenswrapper[4854]: I1125 09:56:13.105558 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 25 09:56:13 crc kubenswrapper[4854]: I1125 09:56:13.447714 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-twfb7" event={"ID":"2cd76364-963f-4af7-83ef-cc73ab247e14","Type":"ContainerStarted","Data":"cc7713cc234cec681d1047ff973362fca609d892367bc105deee7ceb1a3a3666"} Nov 25 09:56:13 crc kubenswrapper[4854]: I1125 09:56:13.447767 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57d769cc4f-twfb7" Nov 25 09:56:13 crc kubenswrapper[4854]: I1125 09:56:13.469172 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57d769cc4f-twfb7" podStartSLOduration=6.460436592 podStartE2EDuration="20.469153429s" podCreationTimestamp="2025-11-25 09:55:53 +0000 UTC" firstStartedPulling="2025-11-25 09:55:57.636184077 +0000 UTC m=+1163.489177453" lastFinishedPulling="2025-11-25 09:56:11.644900924 +0000 UTC m=+1177.497894290" observedRunningTime="2025-11-25 09:56:13.468035788 +0000 UTC m=+1179.321029174" watchObservedRunningTime="2025-11-25 09:56:13.469153429 +0000 UTC m=+1179.322146805" Nov 25 09:56:13 crc kubenswrapper[4854]: I1125 09:56:13.915659 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 25 09:56:14 crc kubenswrapper[4854]: I1125 09:56:14.040055 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-2t2j4"] Nov 25 09:56:16 crc kubenswrapper[4854]: E1125 09:56:16.216954 4854 log.go:32] "CreateContainer in sandbox from runtime service failed" err=< Nov 25 09:56:16 crc kubenswrapper[4854]: rpc error: code = Unknown desc = container create failed: mount `/var/lib/kubelet/pods/967a931c-cbe4-4085-b38b-60399485791d/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Nov 25 09:56:16 crc kubenswrapper[4854]: > podSandboxID="57183b0c96a04758765db6db4b395493831df681b9b46d3bb244e6360dd00646" Nov 25 09:56:16 crc kubenswrapper[4854]: E1125 09:56:16.217453 4854 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 25 09:56:16 crc kubenswrapper[4854]: container &Container{Name:dnsmasq-dns,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-skpr5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-666b6646f7-l6qff_openstack(967a931c-cbe4-4085-b38b-60399485791d): CreateContainerError: container create failed: mount `/var/lib/kubelet/pods/967a931c-cbe4-4085-b38b-60399485791d/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Nov 25 09:56:16 crc kubenswrapper[4854]: > logger="UnhandledError" Nov 25 09:56:16 crc kubenswrapper[4854]: E1125 09:56:16.218515 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dnsmasq-dns\" with CreateContainerError: \"container create failed: mount `/var/lib/kubelet/pods/967a931c-cbe4-4085-b38b-60399485791d/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory\\n\"" pod="openstack/dnsmasq-dns-666b6646f7-l6qff" podUID="967a931c-cbe4-4085-b38b-60399485791d" Nov 25 09:56:16 crc kubenswrapper[4854]: I1125 09:56:16.478953 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-2t2j4" event={"ID":"f1553410-0e8c-4a68-89ed-67f3eeef891d","Type":"ContainerStarted","Data":"76b502c93854ee706d03327e40253015e396f2dba0c5ad7e21eaebf89dd07cb2"} Nov 25 09:56:16 crc kubenswrapper[4854]: I1125 09:56:16.480904 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"e868f46b-cfbf-4642-ad85-c32884fca542","Type":"ContainerStarted","Data":"eb0f12eec79ac43109d7e6781548908a29725c826089e4f7e3cef6a25ccf9619"} Nov 25 09:56:16 crc kubenswrapper[4854]: I1125 09:56:16.485065 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"1ce61789-25f3-44d3-813d-d51d10d068f1","Type":"ContainerStarted","Data":"8f939ede5d9af1a547817d09ec462fe2eff37dd7018d41d1380561097705ad75"} Nov 25 09:56:19 crc kubenswrapper[4854]: I1125 09:56:19.136955 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-57d769cc4f-twfb7" Nov 25 09:56:19 crc kubenswrapper[4854]: I1125 09:56:19.238749 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-l6qff"] Nov 25 09:56:21 crc kubenswrapper[4854]: I1125 09:56:21.631465 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-7c6c5466d9-g4cjl" Nov 25 09:56:21 crc kubenswrapper[4854]: I1125 09:56:21.631764 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-7c6c5466d9-g4cjl" Nov 25 09:56:21 crc kubenswrapper[4854]: I1125 09:56:21.637350 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-7c6c5466d9-g4cjl" Nov 25 09:56:22 crc kubenswrapper[4854]: I1125 09:56:22.538915 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-7c6c5466d9-g4cjl" Nov 25 09:56:22 crc kubenswrapper[4854]: I1125 09:56:22.608303 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-765ccc5d45-ngmp2"] Nov 25 09:56:24 crc kubenswrapper[4854]: E1125 09:56:24.025036 4854 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Nov 25 09:56:24 crc kubenswrapper[4854]: E1125 09:56:24.026431 4854 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fx929,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(575fe5df-5a76-4633-9688-3997a708f3f4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:56:24 crc kubenswrapper[4854]: E1125 09:56:24.027784 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="575fe5df-5a76-4633-9688-3997a708f3f4" Nov 25 09:56:24 crc kubenswrapper[4854]: E1125 09:56:24.033206 4854 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/cluster-observability-operator/dashboards-console-plugin-rhel9@sha256:a69da8bbca8a28dd2925f864d51cc31cf761b10532c553095ba40b242ef701cb" Nov 25 09:56:24 crc kubenswrapper[4854]: E1125 09:56:24.033334 4854 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:observability-ui-dashboards,Image:registry.redhat.io/cluster-observability-operator/dashboards-console-plugin-rhel9@sha256:a69da8bbca8a28dd2925f864d51cc31cf761b10532c553095ba40b242ef701cb,Command:[],Args:[-port=9443 -cert=/var/serving-cert/tls.crt -key=/var/serving-cert/tls.key],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:web,HostPort:0,ContainerPort:9443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:serving-cert,ReadOnly:true,MountPath:/var/serving-cert,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jp57z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000350000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod observability-ui-dashboards-7d5fb4cbfb-c6npv_openshift-operators(a5aeba85-24cb-48ef-9050-93a40b4d67f0): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 09:56:24 crc kubenswrapper[4854]: E1125 09:56:24.036169 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"observability-ui-dashboards\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-c6npv" podUID="a5aeba85-24cb-48ef-9050-93a40b4d67f0" Nov 25 09:56:24 crc kubenswrapper[4854]: E1125 09:56:24.557013 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-cell1-server-0" podUID="575fe5df-5a76-4633-9688-3997a708f3f4" Nov 25 09:56:24 crc kubenswrapper[4854]: E1125 09:56:24.557610 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"observability-ui-dashboards\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cluster-observability-operator/dashboards-console-plugin-rhel9@sha256:a69da8bbca8a28dd2925f864d51cc31cf761b10532c553095ba40b242ef701cb\\\"\"" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-c6npv" podUID="a5aeba85-24cb-48ef-9050-93a40b4d67f0" Nov 25 09:56:24 crc kubenswrapper[4854]: E1125 09:56:24.628944 4854 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-prometheus-config-reloader-rhel9@sha256:1133c973c7472c665f910a722e19c8e2e27accb34b90fab67f14548627ce9c62" Nov 25 09:56:24 crc kubenswrapper[4854]: E1125 09:56:24.629231 4854 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init-config-reloader,Image:registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-prometheus-config-reloader-rhel9@sha256:1133c973c7472c665f910a722e19c8e2e27accb34b90fab67f14548627ce9c62,Command:[/bin/prometheus-config-reloader],Args:[--watch-interval=0 --listen-address=:8081 --config-file=/etc/prometheus/config/prometheus.yaml.gz --config-envsubst-file=/etc/prometheus/config_out/prometheus.env.yaml --watched-dir=/etc/prometheus/rules/prometheus-metric-storage-rulefiles-0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:reloader-init,HostPort:0,ContainerPort:8081,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:SHARD,Value:0,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:false,MountPath:/etc/prometheus/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-out,ReadOnly:false,MountPath:/etc/prometheus/config_out,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:prometheus-metric-storage-rulefiles-0,ReadOnly:false,MountPath:/etc/prometheus/rules/prometheus-metric-storage-rulefiles-0,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-flk2x,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod prometheus-metric-storage-0_openstack(93c535b4-23bb-4c71-8ddc-1304ca205e55): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 09:56:24 crc kubenswrapper[4854]: E1125 09:56:24.630700 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init-config-reloader\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openstack/prometheus-metric-storage-0" podUID="93c535b4-23bb-4c71-8ddc-1304ca205e55" Nov 25 09:56:25 crc kubenswrapper[4854]: I1125 09:56:25.028488 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:56:25 crc kubenswrapper[4854]: I1125 09:56:25.028542 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:56:25 crc kubenswrapper[4854]: I1125 09:56:25.028586 4854 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" Nov 25 09:56:25 crc kubenswrapper[4854]: I1125 09:56:25.029422 4854 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8c88dce0ea083d0b4318356bc4c4cafd9ff804af077bca2201c157b710b82d4d"} pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:56:25 crc kubenswrapper[4854]: I1125 09:56:25.029492 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" containerID="cri-o://8c88dce0ea083d0b4318356bc4c4cafd9ff804af077bca2201c157b710b82d4d" gracePeriod=600 Nov 25 09:56:25 crc kubenswrapper[4854]: I1125 09:56:25.576572 4854 generic.go:334] "Generic (PLEG): container finished" podID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerID="8c88dce0ea083d0b4318356bc4c4cafd9ff804af077bca2201c157b710b82d4d" exitCode=0 Nov 25 09:56:25 crc kubenswrapper[4854]: I1125 09:56:25.576656 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerDied","Data":"8c88dce0ea083d0b4318356bc4c4cafd9ff804af077bca2201c157b710b82d4d"} Nov 25 09:56:25 crc kubenswrapper[4854]: I1125 09:56:25.576744 4854 scope.go:117] "RemoveContainer" containerID="5e14519da63f04cdbdb7f55713d2722df29a5332d12866e5327a4659d36c5bcf" Nov 25 09:56:25 crc kubenswrapper[4854]: E1125 09:56:25.594162 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init-config-reloader\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-prometheus-config-reloader-rhel9@sha256:1133c973c7472c665f910a722e19c8e2e27accb34b90fab67f14548627ce9c62\\\"\"" pod="openstack/prometheus-metric-storage-0" podUID="93c535b4-23bb-4c71-8ddc-1304ca205e55" Nov 25 09:56:29 crc kubenswrapper[4854]: I1125 09:56:29.625530 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerStarted","Data":"65f6bcfa40b1e5bbb70c379bd608e17d8c0ff4d22430507df2078040825b6744"} Nov 25 09:56:29 crc kubenswrapper[4854]: I1125 09:56:29.627420 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"0984ac84-1833-4ddb-b21b-d526b64e9991","Type":"ContainerStarted","Data":"0e51572786654d0a72f5e1601f6384927f460b9894e7f2319bfd4b4ad67bc5ed"} Nov 25 09:56:30 crc kubenswrapper[4854]: I1125 09:56:30.641080 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"827c4948-d1bc-4c63-838b-57f267bdcf93","Type":"ContainerStarted","Data":"1bb38ecc34458dd13dbd461802f0766ecedaa3b93e9f967d74fdd7c4807e6577"} Nov 25 09:56:31 crc kubenswrapper[4854]: I1125 09:56:31.651035 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"6894f0be-f53f-401b-8707-4cc0cfd020dc","Type":"ContainerStarted","Data":"d885495fc11b7f4d9575608f39ce77e3f0ad78db4459a1f55ec565f35edd06ec"} Nov 25 09:56:31 crc kubenswrapper[4854]: I1125 09:56:31.654122 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"dc586641-37b8-4b9b-8479-a3c552bec71d","Type":"ContainerStarted","Data":"1d56aa5fdd0201276ecfee6387fdafa67cd3f4cd93571921d3582c84f66d1f16"} Nov 25 09:56:32 crc kubenswrapper[4854]: I1125 09:56:32.668164 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"e868f46b-cfbf-4642-ad85-c32884fca542","Type":"ContainerStarted","Data":"678aa5064c0561cbdddb0a4ac53857e76328e7c2df324c8bb0b4ac16345ad179"} Nov 25 09:56:32 crc kubenswrapper[4854]: I1125 09:56:32.673155 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"2f639da7-7576-4274-94c5-4304b6af9b4d","Type":"ContainerStarted","Data":"d9f73e30ad51153033a58ebd98fc6d90df6bbdbb32ac0863b7a3d3e312d63d41"} Nov 25 09:56:32 crc kubenswrapper[4854]: I1125 09:56:32.673231 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 25 09:56:32 crc kubenswrapper[4854]: I1125 09:56:32.674573 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-7dl26" event={"ID":"04573f28-a6e2-46ca-8a02-a2265c5d68e9","Type":"ContainerStarted","Data":"c8d5d9b5e74842977631a3ecff083ffdba4e605548d3f3709434ca27ae68587a"} Nov 25 09:56:32 crc kubenswrapper[4854]: I1125 09:56:32.674705 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-7dl26" Nov 25 09:56:32 crc kubenswrapper[4854]: I1125 09:56:32.675802 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"1ce61789-25f3-44d3-813d-d51d10d068f1","Type":"ContainerStarted","Data":"0fded07ab5a18a757e473e11d6b6e172023578e021df64bc72deba392c8b8cf7"} Nov 25 09:56:32 crc kubenswrapper[4854]: I1125 09:56:32.677456 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-2t2j4" event={"ID":"f1553410-0e8c-4a68-89ed-67f3eeef891d","Type":"ContainerStarted","Data":"fc8d27016aff639a174e36626972300968ea146bcf19bc4be998bdf9a17df37c"} Nov 25 09:56:32 crc kubenswrapper[4854]: I1125 09:56:32.679520 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-l6qff" event={"ID":"967a931c-cbe4-4085-b38b-60399485791d","Type":"ContainerStarted","Data":"dab93c541d07e9db5e519b4a7847185e348f04d7812adc0288dedc1c982fc1d0"} Nov 25 09:56:32 crc kubenswrapper[4854]: I1125 09:56:32.679539 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-666b6646f7-l6qff" podUID="967a931c-cbe4-4085-b38b-60399485791d" containerName="dnsmasq-dns" containerID="cri-o://dab93c541d07e9db5e519b4a7847185e348f04d7812adc0288dedc1c982fc1d0" gracePeriod=10 Nov 25 09:56:32 crc kubenswrapper[4854]: I1125 09:56:32.679597 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-666b6646f7-l6qff" Nov 25 09:56:32 crc kubenswrapper[4854]: I1125 09:56:32.681916 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"a52007f7-ea68-422e-90d3-be11aa0184d5","Type":"ContainerStarted","Data":"abf7216f7246fc294d8ac4f76239a8fcc5bab668725dbe1e584de57ec01e0f2c"} Nov 25 09:56:32 crc kubenswrapper[4854]: I1125 09:56:32.681954 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 25 09:56:32 crc kubenswrapper[4854]: I1125 09:56:32.698115 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=19.119109974 podStartE2EDuration="35.698100092s" podCreationTimestamp="2025-11-25 09:55:57 +0000 UTC" firstStartedPulling="2025-11-25 09:56:11.423906681 +0000 UTC m=+1177.276900057" lastFinishedPulling="2025-11-25 09:56:28.002896799 +0000 UTC m=+1193.855890175" observedRunningTime="2025-11-25 09:56:32.689838515 +0000 UTC m=+1198.542831891" watchObservedRunningTime="2025-11-25 09:56:32.698100092 +0000 UTC m=+1198.551093468" Nov 25 09:56:32 crc kubenswrapper[4854]: I1125 09:56:32.718432 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-666b6646f7-l6qff" podStartSLOduration=22.505456999 podStartE2EDuration="39.718412428s" podCreationTimestamp="2025-11-25 09:55:53 +0000 UTC" firstStartedPulling="2025-11-25 09:55:54.289500998 +0000 UTC m=+1160.142494374" lastFinishedPulling="2025-11-25 09:56:11.502456427 +0000 UTC m=+1177.355449803" observedRunningTime="2025-11-25 09:56:32.71482743 +0000 UTC m=+1198.567820806" watchObservedRunningTime="2025-11-25 09:56:32.718412428 +0000 UTC m=+1198.571405804" Nov 25 09:56:32 crc kubenswrapper[4854]: I1125 09:56:32.765542 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=13.262852799000001 podStartE2EDuration="32.765518981s" podCreationTimestamp="2025-11-25 09:56:00 +0000 UTC" firstStartedPulling="2025-11-25 09:56:11.975364199 +0000 UTC m=+1177.828357575" lastFinishedPulling="2025-11-25 09:56:31.478030351 +0000 UTC m=+1197.331023757" observedRunningTime="2025-11-25 09:56:32.759802514 +0000 UTC m=+1198.612795900" watchObservedRunningTime="2025-11-25 09:56:32.765518981 +0000 UTC m=+1198.618512357" Nov 25 09:56:32 crc kubenswrapper[4854]: I1125 09:56:32.786294 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-7dl26" podStartSLOduration=14.000954543 podStartE2EDuration="30.78627992s" podCreationTimestamp="2025-11-25 09:56:02 +0000 UTC" firstStartedPulling="2025-11-25 09:56:11.970773234 +0000 UTC m=+1177.823766610" lastFinishedPulling="2025-11-25 09:56:28.756098611 +0000 UTC m=+1194.609091987" observedRunningTime="2025-11-25 09:56:32.78335074 +0000 UTC m=+1198.636344146" watchObservedRunningTime="2025-11-25 09:56:32.78627992 +0000 UTC m=+1198.639273296" Nov 25 09:56:33 crc kubenswrapper[4854]: I1125 09:56:33.696316 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"c24229dd-3c9c-47b6-8080-a1d51e0e6868","Type":"ContainerStarted","Data":"18a3e87a49d1ced9b4d2c37ea39a103b0fcdb27717118ecf2741f03bc71c3c0e"} Nov 25 09:56:33 crc kubenswrapper[4854]: I1125 09:56:33.701286 4854 generic.go:334] "Generic (PLEG): container finished" podID="f1553410-0e8c-4a68-89ed-67f3eeef891d" containerID="fc8d27016aff639a174e36626972300968ea146bcf19bc4be998bdf9a17df37c" exitCode=0 Nov 25 09:56:33 crc kubenswrapper[4854]: I1125 09:56:33.701346 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-2t2j4" event={"ID":"f1553410-0e8c-4a68-89ed-67f3eeef891d","Type":"ContainerDied","Data":"fc8d27016aff639a174e36626972300968ea146bcf19bc4be998bdf9a17df37c"} Nov 25 09:56:33 crc kubenswrapper[4854]: I1125 09:56:33.707355 4854 generic.go:334] "Generic (PLEG): container finished" podID="967a931c-cbe4-4085-b38b-60399485791d" containerID="dab93c541d07e9db5e519b4a7847185e348f04d7812adc0288dedc1c982fc1d0" exitCode=0 Nov 25 09:56:33 crc kubenswrapper[4854]: I1125 09:56:33.707438 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-l6qff" event={"ID":"967a931c-cbe4-4085-b38b-60399485791d","Type":"ContainerDied","Data":"dab93c541d07e9db5e519b4a7847185e348f04d7812adc0288dedc1c982fc1d0"} Nov 25 09:56:33 crc kubenswrapper[4854]: I1125 09:56:33.707602 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-l6qff" event={"ID":"967a931c-cbe4-4085-b38b-60399485791d","Type":"ContainerDied","Data":"57183b0c96a04758765db6db4b395493831df681b9b46d3bb244e6360dd00646"} Nov 25 09:56:33 crc kubenswrapper[4854]: I1125 09:56:33.707617 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="57183b0c96a04758765db6db4b395493831df681b9b46d3bb244e6360dd00646" Nov 25 09:56:33 crc kubenswrapper[4854]: I1125 09:56:33.752334 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-l6qff" Nov 25 09:56:33 crc kubenswrapper[4854]: I1125 09:56:33.804906 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-skpr5\" (UniqueName: \"kubernetes.io/projected/967a931c-cbe4-4085-b38b-60399485791d-kube-api-access-skpr5\") pod \"967a931c-cbe4-4085-b38b-60399485791d\" (UID: \"967a931c-cbe4-4085-b38b-60399485791d\") " Nov 25 09:56:33 crc kubenswrapper[4854]: I1125 09:56:33.805164 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/967a931c-cbe4-4085-b38b-60399485791d-config\") pod \"967a931c-cbe4-4085-b38b-60399485791d\" (UID: \"967a931c-cbe4-4085-b38b-60399485791d\") " Nov 25 09:56:33 crc kubenswrapper[4854]: I1125 09:56:33.805225 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/967a931c-cbe4-4085-b38b-60399485791d-dns-svc\") pod \"967a931c-cbe4-4085-b38b-60399485791d\" (UID: \"967a931c-cbe4-4085-b38b-60399485791d\") " Nov 25 09:56:33 crc kubenswrapper[4854]: I1125 09:56:33.816866 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/967a931c-cbe4-4085-b38b-60399485791d-kube-api-access-skpr5" (OuterVolumeSpecName: "kube-api-access-skpr5") pod "967a931c-cbe4-4085-b38b-60399485791d" (UID: "967a931c-cbe4-4085-b38b-60399485791d"). InnerVolumeSpecName "kube-api-access-skpr5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:56:33 crc kubenswrapper[4854]: I1125 09:56:33.853437 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/967a931c-cbe4-4085-b38b-60399485791d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "967a931c-cbe4-4085-b38b-60399485791d" (UID: "967a931c-cbe4-4085-b38b-60399485791d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:56:33 crc kubenswrapper[4854]: I1125 09:56:33.877616 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/967a931c-cbe4-4085-b38b-60399485791d-config" (OuterVolumeSpecName: "config") pod "967a931c-cbe4-4085-b38b-60399485791d" (UID: "967a931c-cbe4-4085-b38b-60399485791d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:56:33 crc kubenswrapper[4854]: I1125 09:56:33.908650 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-skpr5\" (UniqueName: \"kubernetes.io/projected/967a931c-cbe4-4085-b38b-60399485791d-kube-api-access-skpr5\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:33 crc kubenswrapper[4854]: I1125 09:56:33.908708 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/967a931c-cbe4-4085-b38b-60399485791d-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:33 crc kubenswrapper[4854]: I1125 09:56:33.908718 4854 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/967a931c-cbe4-4085-b38b-60399485791d-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:34 crc kubenswrapper[4854]: I1125 09:56:34.721409 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-2t2j4" event={"ID":"f1553410-0e8c-4a68-89ed-67f3eeef891d","Type":"ContainerStarted","Data":"5fd5431b86cdc5b927dd306727fe05328a027ef95a08bcb54cf0ec3fcaaf75f3"} Nov 25 09:56:34 crc kubenswrapper[4854]: I1125 09:56:34.721505 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-l6qff" Nov 25 09:56:34 crc kubenswrapper[4854]: I1125 09:56:34.722465 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-2t2j4" Nov 25 09:56:34 crc kubenswrapper[4854]: I1125 09:56:34.722497 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-2t2j4" Nov 25 09:56:34 crc kubenswrapper[4854]: I1125 09:56:34.722509 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-2t2j4" event={"ID":"f1553410-0e8c-4a68-89ed-67f3eeef891d","Type":"ContainerStarted","Data":"c4024c50039087c171e9edc13bd5bbd3ba2b19bc61f21c68885a14747af60954"} Nov 25 09:56:34 crc kubenswrapper[4854]: I1125 09:56:34.746998 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-2t2j4" podStartSLOduration=20.914430299 podStartE2EDuration="32.746974047s" podCreationTimestamp="2025-11-25 09:56:02 +0000 UTC" firstStartedPulling="2025-11-25 09:56:16.248287228 +0000 UTC m=+1182.101280604" lastFinishedPulling="2025-11-25 09:56:28.080830976 +0000 UTC m=+1193.933824352" observedRunningTime="2025-11-25 09:56:34.744095938 +0000 UTC m=+1200.597089324" watchObservedRunningTime="2025-11-25 09:56:34.746974047 +0000 UTC m=+1200.599967433" Nov 25 09:56:34 crc kubenswrapper[4854]: I1125 09:56:34.769234 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-l6qff"] Nov 25 09:56:34 crc kubenswrapper[4854]: I1125 09:56:34.778755 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-l6qff"] Nov 25 09:56:35 crc kubenswrapper[4854]: I1125 09:56:35.026196 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="967a931c-cbe4-4085-b38b-60399485791d" path="/var/lib/kubelet/pods/967a931c-cbe4-4085-b38b-60399485791d/volumes" Nov 25 09:56:37 crc kubenswrapper[4854]: I1125 09:56:37.746581 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"1ce61789-25f3-44d3-813d-d51d10d068f1","Type":"ContainerStarted","Data":"4dd08e1825d41a39f338a6719514482012b3b3a2600c04f889e8b57d47defe05"} Nov 25 09:56:37 crc kubenswrapper[4854]: I1125 09:56:37.749005 4854 generic.go:334] "Generic (PLEG): container finished" podID="827c4948-d1bc-4c63-838b-57f267bdcf93" containerID="1bb38ecc34458dd13dbd461802f0766ecedaa3b93e9f967d74fdd7c4807e6577" exitCode=0 Nov 25 09:56:37 crc kubenswrapper[4854]: I1125 09:56:37.749073 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"827c4948-d1bc-4c63-838b-57f267bdcf93","Type":"ContainerDied","Data":"1bb38ecc34458dd13dbd461802f0766ecedaa3b93e9f967d74fdd7c4807e6577"} Nov 25 09:56:37 crc kubenswrapper[4854]: I1125 09:56:37.754654 4854 generic.go:334] "Generic (PLEG): container finished" podID="0984ac84-1833-4ddb-b21b-d526b64e9991" containerID="0e51572786654d0a72f5e1601f6384927f460b9894e7f2319bfd4b4ad67bc5ed" exitCode=0 Nov 25 09:56:37 crc kubenswrapper[4854]: I1125 09:56:37.754722 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"0984ac84-1833-4ddb-b21b-d526b64e9991","Type":"ContainerDied","Data":"0e51572786654d0a72f5e1601f6384927f460b9894e7f2319bfd4b4ad67bc5ed"} Nov 25 09:56:37 crc kubenswrapper[4854]: I1125 09:56:37.763651 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"e868f46b-cfbf-4642-ad85-c32884fca542","Type":"ContainerStarted","Data":"4eac512309bbdc5e48f114d300233ff8a51cf8dbfd6bab8b8f9197d1139eb66e"} Nov 25 09:56:37 crc kubenswrapper[4854]: I1125 09:56:37.766610 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=10.867409771 podStartE2EDuration="31.766597524s" podCreationTimestamp="2025-11-25 09:56:06 +0000 UTC" firstStartedPulling="2025-11-25 09:56:16.217016129 +0000 UTC m=+1182.070009505" lastFinishedPulling="2025-11-25 09:56:37.116203882 +0000 UTC m=+1202.969197258" observedRunningTime="2025-11-25 09:56:37.765559366 +0000 UTC m=+1203.618552762" watchObservedRunningTime="2025-11-25 09:56:37.766597524 +0000 UTC m=+1203.619590900" Nov 25 09:56:37 crc kubenswrapper[4854]: I1125 09:56:37.849885 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=13.96102392 podStartE2EDuration="34.849857198s" podCreationTimestamp="2025-11-25 09:56:03 +0000 UTC" firstStartedPulling="2025-11-25 09:56:16.217064801 +0000 UTC m=+1182.070058177" lastFinishedPulling="2025-11-25 09:56:37.105898079 +0000 UTC m=+1202.958891455" observedRunningTime="2025-11-25 09:56:37.832747239 +0000 UTC m=+1203.685740635" watchObservedRunningTime="2025-11-25 09:56:37.849857198 +0000 UTC m=+1203.702850564" Nov 25 09:56:37 crc kubenswrapper[4854]: I1125 09:56:37.851485 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 25 09:56:37 crc kubenswrapper[4854]: I1125 09:56:37.851693 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 25 09:56:37 crc kubenswrapper[4854]: I1125 09:56:37.899271 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 25 09:56:38 crc kubenswrapper[4854]: I1125 09:56:38.259814 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 25 09:56:38 crc kubenswrapper[4854]: I1125 09:56:38.776546 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"827c4948-d1bc-4c63-838b-57f267bdcf93","Type":"ContainerStarted","Data":"9a9eaa4b6370181c1da58366e8ec6aadc5ac193a895d4747a9a9d75d089e645c"} Nov 25 09:56:38 crc kubenswrapper[4854]: I1125 09:56:38.779478 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"0984ac84-1833-4ddb-b21b-d526b64e9991","Type":"ContainerStarted","Data":"f40a6ef016ff0dcc20f69e807de81677419d25c7fb8b69f0530e4ebb8aa1f9fd"} Nov 25 09:56:38 crc kubenswrapper[4854]: I1125 09:56:38.808643 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=27.128219164 podStartE2EDuration="42.80862237s" podCreationTimestamp="2025-11-25 09:55:56 +0000 UTC" firstStartedPulling="2025-11-25 09:56:11.505898081 +0000 UTC m=+1177.358891457" lastFinishedPulling="2025-11-25 09:56:27.186301287 +0000 UTC m=+1193.039294663" observedRunningTime="2025-11-25 09:56:38.801105113 +0000 UTC m=+1204.654098509" watchObservedRunningTime="2025-11-25 09:56:38.80862237 +0000 UTC m=+1204.661615746" Nov 25 09:56:38 crc kubenswrapper[4854]: I1125 09:56:38.831177 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=22.806275747 podStartE2EDuration="43.831159278s" podCreationTimestamp="2025-11-25 09:55:55 +0000 UTC" firstStartedPulling="2025-11-25 09:56:04.565926318 +0000 UTC m=+1170.418919694" lastFinishedPulling="2025-11-25 09:56:25.590809849 +0000 UTC m=+1191.443803225" observedRunningTime="2025-11-25 09:56:38.829939665 +0000 UTC m=+1204.682933041" watchObservedRunningTime="2025-11-25 09:56:38.831159278 +0000 UTC m=+1204.684152654" Nov 25 09:56:38 crc kubenswrapper[4854]: I1125 09:56:38.842970 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.227449 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-87xrn"] Nov 25 09:56:39 crc kubenswrapper[4854]: E1125 09:56:39.228006 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="967a931c-cbe4-4085-b38b-60399485791d" containerName="init" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.228031 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="967a931c-cbe4-4085-b38b-60399485791d" containerName="init" Nov 25 09:56:39 crc kubenswrapper[4854]: E1125 09:56:39.228087 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="967a931c-cbe4-4085-b38b-60399485791d" containerName="dnsmasq-dns" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.228098 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="967a931c-cbe4-4085-b38b-60399485791d" containerName="dnsmasq-dns" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.228323 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="967a931c-cbe4-4085-b38b-60399485791d" containerName="dnsmasq-dns" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.229704 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-87xrn" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.234893 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.271014 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-87xrn"] Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.326055 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fzgg8\" (UniqueName: \"kubernetes.io/projected/107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc-kube-api-access-fzgg8\") pod \"dnsmasq-dns-6bc7876d45-87xrn\" (UID: \"107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc\") " pod="openstack/dnsmasq-dns-6bc7876d45-87xrn" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.326118 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc-config\") pod \"dnsmasq-dns-6bc7876d45-87xrn\" (UID: \"107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc\") " pod="openstack/dnsmasq-dns-6bc7876d45-87xrn" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.326235 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-87xrn\" (UID: \"107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc\") " pod="openstack/dnsmasq-dns-6bc7876d45-87xrn" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.326386 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-87xrn\" (UID: \"107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc\") " pod="openstack/dnsmasq-dns-6bc7876d45-87xrn" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.357401 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-ff4zf"] Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.358723 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-ff4zf" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.361201 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.380748 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-ff4zf"] Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.432168 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-87xrn\" (UID: \"107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc\") " pod="openstack/dnsmasq-dns-6bc7876d45-87xrn" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.432227 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/39cb343e-3bd0-4a63-b3dc-6814e8e8bca1-ovs-rundir\") pod \"ovn-controller-metrics-ff4zf\" (UID: \"39cb343e-3bd0-4a63-b3dc-6814e8e8bca1\") " pod="openstack/ovn-controller-metrics-ff4zf" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.432324 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fb7wm\" (UniqueName: \"kubernetes.io/projected/39cb343e-3bd0-4a63-b3dc-6814e8e8bca1-kube-api-access-fb7wm\") pod \"ovn-controller-metrics-ff4zf\" (UID: \"39cb343e-3bd0-4a63-b3dc-6814e8e8bca1\") " pod="openstack/ovn-controller-metrics-ff4zf" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.432389 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/39cb343e-3bd0-4a63-b3dc-6814e8e8bca1-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-ff4zf\" (UID: \"39cb343e-3bd0-4a63-b3dc-6814e8e8bca1\") " pod="openstack/ovn-controller-metrics-ff4zf" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.432457 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-87xrn\" (UID: \"107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc\") " pod="openstack/dnsmasq-dns-6bc7876d45-87xrn" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.432499 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39cb343e-3bd0-4a63-b3dc-6814e8e8bca1-config\") pod \"ovn-controller-metrics-ff4zf\" (UID: \"39cb343e-3bd0-4a63-b3dc-6814e8e8bca1\") " pod="openstack/ovn-controller-metrics-ff4zf" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.432538 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/39cb343e-3bd0-4a63-b3dc-6814e8e8bca1-ovn-rundir\") pod \"ovn-controller-metrics-ff4zf\" (UID: \"39cb343e-3bd0-4a63-b3dc-6814e8e8bca1\") " pod="openstack/ovn-controller-metrics-ff4zf" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.432583 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fzgg8\" (UniqueName: \"kubernetes.io/projected/107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc-kube-api-access-fzgg8\") pod \"dnsmasq-dns-6bc7876d45-87xrn\" (UID: \"107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc\") " pod="openstack/dnsmasq-dns-6bc7876d45-87xrn" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.432615 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc-config\") pod \"dnsmasq-dns-6bc7876d45-87xrn\" (UID: \"107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc\") " pod="openstack/dnsmasq-dns-6bc7876d45-87xrn" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.432699 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39cb343e-3bd0-4a63-b3dc-6814e8e8bca1-combined-ca-bundle\") pod \"ovn-controller-metrics-ff4zf\" (UID: \"39cb343e-3bd0-4a63-b3dc-6814e8e8bca1\") " pod="openstack/ovn-controller-metrics-ff4zf" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.433733 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-87xrn\" (UID: \"107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc\") " pod="openstack/dnsmasq-dns-6bc7876d45-87xrn" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.434349 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-87xrn\" (UID: \"107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc\") " pod="openstack/dnsmasq-dns-6bc7876d45-87xrn" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.434977 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc-config\") pod \"dnsmasq-dns-6bc7876d45-87xrn\" (UID: \"107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc\") " pod="openstack/dnsmasq-dns-6bc7876d45-87xrn" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.444782 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.485501 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fzgg8\" (UniqueName: \"kubernetes.io/projected/107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc-kube-api-access-fzgg8\") pod \"dnsmasq-dns-6bc7876d45-87xrn\" (UID: \"107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc\") " pod="openstack/dnsmasq-dns-6bc7876d45-87xrn" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.534471 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fb7wm\" (UniqueName: \"kubernetes.io/projected/39cb343e-3bd0-4a63-b3dc-6814e8e8bca1-kube-api-access-fb7wm\") pod \"ovn-controller-metrics-ff4zf\" (UID: \"39cb343e-3bd0-4a63-b3dc-6814e8e8bca1\") " pod="openstack/ovn-controller-metrics-ff4zf" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.534540 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/39cb343e-3bd0-4a63-b3dc-6814e8e8bca1-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-ff4zf\" (UID: \"39cb343e-3bd0-4a63-b3dc-6814e8e8bca1\") " pod="openstack/ovn-controller-metrics-ff4zf" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.534585 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39cb343e-3bd0-4a63-b3dc-6814e8e8bca1-config\") pod \"ovn-controller-metrics-ff4zf\" (UID: \"39cb343e-3bd0-4a63-b3dc-6814e8e8bca1\") " pod="openstack/ovn-controller-metrics-ff4zf" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.534612 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/39cb343e-3bd0-4a63-b3dc-6814e8e8bca1-ovn-rundir\") pod \"ovn-controller-metrics-ff4zf\" (UID: \"39cb343e-3bd0-4a63-b3dc-6814e8e8bca1\") " pod="openstack/ovn-controller-metrics-ff4zf" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.534671 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39cb343e-3bd0-4a63-b3dc-6814e8e8bca1-combined-ca-bundle\") pod \"ovn-controller-metrics-ff4zf\" (UID: \"39cb343e-3bd0-4a63-b3dc-6814e8e8bca1\") " pod="openstack/ovn-controller-metrics-ff4zf" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.534738 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/39cb343e-3bd0-4a63-b3dc-6814e8e8bca1-ovs-rundir\") pod \"ovn-controller-metrics-ff4zf\" (UID: \"39cb343e-3bd0-4a63-b3dc-6814e8e8bca1\") " pod="openstack/ovn-controller-metrics-ff4zf" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.535015 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/39cb343e-3bd0-4a63-b3dc-6814e8e8bca1-ovs-rundir\") pod \"ovn-controller-metrics-ff4zf\" (UID: \"39cb343e-3bd0-4a63-b3dc-6814e8e8bca1\") " pod="openstack/ovn-controller-metrics-ff4zf" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.535421 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/39cb343e-3bd0-4a63-b3dc-6814e8e8bca1-ovn-rundir\") pod \"ovn-controller-metrics-ff4zf\" (UID: \"39cb343e-3bd0-4a63-b3dc-6814e8e8bca1\") " pod="openstack/ovn-controller-metrics-ff4zf" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.535587 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39cb343e-3bd0-4a63-b3dc-6814e8e8bca1-config\") pod \"ovn-controller-metrics-ff4zf\" (UID: \"39cb343e-3bd0-4a63-b3dc-6814e8e8bca1\") " pod="openstack/ovn-controller-metrics-ff4zf" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.558740 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/39cb343e-3bd0-4a63-b3dc-6814e8e8bca1-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-ff4zf\" (UID: \"39cb343e-3bd0-4a63-b3dc-6814e8e8bca1\") " pod="openstack/ovn-controller-metrics-ff4zf" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.559188 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-87xrn" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.576158 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39cb343e-3bd0-4a63-b3dc-6814e8e8bca1-combined-ca-bundle\") pod \"ovn-controller-metrics-ff4zf\" (UID: \"39cb343e-3bd0-4a63-b3dc-6814e8e8bca1\") " pod="openstack/ovn-controller-metrics-ff4zf" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.587250 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fb7wm\" (UniqueName: \"kubernetes.io/projected/39cb343e-3bd0-4a63-b3dc-6814e8e8bca1-kube-api-access-fb7wm\") pod \"ovn-controller-metrics-ff4zf\" (UID: \"39cb343e-3bd0-4a63-b3dc-6814e8e8bca1\") " pod="openstack/ovn-controller-metrics-ff4zf" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.672084 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-87xrn"] Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.694963 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-ff4zf" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.734269 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8554648995-2hn7c"] Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.736065 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-2hn7c" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.748059 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.759112 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-2hn7c"] Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.843803 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4fff0203-70fe-435a-b845-cc6e4c321b60-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-2hn7c\" (UID: \"4fff0203-70fe-435a-b845-cc6e4c321b60\") " pod="openstack/dnsmasq-dns-8554648995-2hn7c" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.843866 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fff0203-70fe-435a-b845-cc6e4c321b60-config\") pod \"dnsmasq-dns-8554648995-2hn7c\" (UID: \"4fff0203-70fe-435a-b845-cc6e4c321b60\") " pod="openstack/dnsmasq-dns-8554648995-2hn7c" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.844005 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4fff0203-70fe-435a-b845-cc6e4c321b60-dns-svc\") pod \"dnsmasq-dns-8554648995-2hn7c\" (UID: \"4fff0203-70fe-435a-b845-cc6e4c321b60\") " pod="openstack/dnsmasq-dns-8554648995-2hn7c" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.844058 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4fff0203-70fe-435a-b845-cc6e4c321b60-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-2hn7c\" (UID: \"4fff0203-70fe-435a-b845-cc6e4c321b60\") " pod="openstack/dnsmasq-dns-8554648995-2hn7c" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.844108 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sbl9h\" (UniqueName: \"kubernetes.io/projected/4fff0203-70fe-435a-b845-cc6e4c321b60-kube-api-access-sbl9h\") pod \"dnsmasq-dns-8554648995-2hn7c\" (UID: \"4fff0203-70fe-435a-b845-cc6e4c321b60\") " pod="openstack/dnsmasq-dns-8554648995-2hn7c" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.946491 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4fff0203-70fe-435a-b845-cc6e4c321b60-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-2hn7c\" (UID: \"4fff0203-70fe-435a-b845-cc6e4c321b60\") " pod="openstack/dnsmasq-dns-8554648995-2hn7c" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.946877 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fff0203-70fe-435a-b845-cc6e4c321b60-config\") pod \"dnsmasq-dns-8554648995-2hn7c\" (UID: \"4fff0203-70fe-435a-b845-cc6e4c321b60\") " pod="openstack/dnsmasq-dns-8554648995-2hn7c" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.947046 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4fff0203-70fe-435a-b845-cc6e4c321b60-dns-svc\") pod \"dnsmasq-dns-8554648995-2hn7c\" (UID: \"4fff0203-70fe-435a-b845-cc6e4c321b60\") " pod="openstack/dnsmasq-dns-8554648995-2hn7c" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.947458 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4fff0203-70fe-435a-b845-cc6e4c321b60-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-2hn7c\" (UID: \"4fff0203-70fe-435a-b845-cc6e4c321b60\") " pod="openstack/dnsmasq-dns-8554648995-2hn7c" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.947960 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fff0203-70fe-435a-b845-cc6e4c321b60-config\") pod \"dnsmasq-dns-8554648995-2hn7c\" (UID: \"4fff0203-70fe-435a-b845-cc6e4c321b60\") " pod="openstack/dnsmasq-dns-8554648995-2hn7c" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.948152 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4fff0203-70fe-435a-b845-cc6e4c321b60-dns-svc\") pod \"dnsmasq-dns-8554648995-2hn7c\" (UID: \"4fff0203-70fe-435a-b845-cc6e4c321b60\") " pod="openstack/dnsmasq-dns-8554648995-2hn7c" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.948290 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4fff0203-70fe-435a-b845-cc6e4c321b60-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-2hn7c\" (UID: \"4fff0203-70fe-435a-b845-cc6e4c321b60\") " pod="openstack/dnsmasq-dns-8554648995-2hn7c" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.949005 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4fff0203-70fe-435a-b845-cc6e4c321b60-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-2hn7c\" (UID: \"4fff0203-70fe-435a-b845-cc6e4c321b60\") " pod="openstack/dnsmasq-dns-8554648995-2hn7c" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.949229 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sbl9h\" (UniqueName: \"kubernetes.io/projected/4fff0203-70fe-435a-b845-cc6e4c321b60-kube-api-access-sbl9h\") pod \"dnsmasq-dns-8554648995-2hn7c\" (UID: \"4fff0203-70fe-435a-b845-cc6e4c321b60\") " pod="openstack/dnsmasq-dns-8554648995-2hn7c" Nov 25 09:56:39 crc kubenswrapper[4854]: I1125 09:56:39.973641 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sbl9h\" (UniqueName: \"kubernetes.io/projected/4fff0203-70fe-435a-b845-cc6e4c321b60-kube-api-access-sbl9h\") pod \"dnsmasq-dns-8554648995-2hn7c\" (UID: \"4fff0203-70fe-435a-b845-cc6e4c321b60\") " pod="openstack/dnsmasq-dns-8554648995-2hn7c" Nov 25 09:56:40 crc kubenswrapper[4854]: I1125 09:56:40.095551 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-2hn7c" Nov 25 09:56:40 crc kubenswrapper[4854]: I1125 09:56:40.225133 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-87xrn"] Nov 25 09:56:40 crc kubenswrapper[4854]: I1125 09:56:40.335701 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-ff4zf"] Nov 25 09:56:40 crc kubenswrapper[4854]: I1125 09:56:40.443845 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 25 09:56:40 crc kubenswrapper[4854]: I1125 09:56:40.449058 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 25 09:56:40 crc kubenswrapper[4854]: I1125 09:56:40.610304 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-2hn7c"] Nov 25 09:56:40 crc kubenswrapper[4854]: I1125 09:56:40.665304 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-qf5fg"] Nov 25 09:56:40 crc kubenswrapper[4854]: I1125 09:56:40.667447 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-qf5fg" Nov 25 09:56:40 crc kubenswrapper[4854]: I1125 09:56:40.671905 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 25 09:56:40 crc kubenswrapper[4854]: I1125 09:56:40.717837 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-qf5fg"] Nov 25 09:56:40 crc kubenswrapper[4854]: I1125 09:56:40.791968 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-2hn7c"] Nov 25 09:56:40 crc kubenswrapper[4854]: I1125 09:56:40.819720 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8661757b-9fae-4808-9568-3997a0b0c7b6-config\") pod \"dnsmasq-dns-b8fbc5445-qf5fg\" (UID: \"8661757b-9fae-4808-9568-3997a0b0c7b6\") " pod="openstack/dnsmasq-dns-b8fbc5445-qf5fg" Nov 25 09:56:40 crc kubenswrapper[4854]: I1125 09:56:40.820033 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8661757b-9fae-4808-9568-3997a0b0c7b6-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-qf5fg\" (UID: \"8661757b-9fae-4808-9568-3997a0b0c7b6\") " pod="openstack/dnsmasq-dns-b8fbc5445-qf5fg" Nov 25 09:56:40 crc kubenswrapper[4854]: I1125 09:56:40.820112 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9c6nh\" (UniqueName: \"kubernetes.io/projected/8661757b-9fae-4808-9568-3997a0b0c7b6-kube-api-access-9c6nh\") pod \"dnsmasq-dns-b8fbc5445-qf5fg\" (UID: \"8661757b-9fae-4808-9568-3997a0b0c7b6\") " pod="openstack/dnsmasq-dns-b8fbc5445-qf5fg" Nov 25 09:56:40 crc kubenswrapper[4854]: I1125 09:56:40.820146 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8661757b-9fae-4808-9568-3997a0b0c7b6-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-qf5fg\" (UID: \"8661757b-9fae-4808-9568-3997a0b0c7b6\") " pod="openstack/dnsmasq-dns-b8fbc5445-qf5fg" Nov 25 09:56:40 crc kubenswrapper[4854]: I1125 09:56:40.820170 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8661757b-9fae-4808-9568-3997a0b0c7b6-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-qf5fg\" (UID: \"8661757b-9fae-4808-9568-3997a0b0c7b6\") " pod="openstack/dnsmasq-dns-b8fbc5445-qf5fg" Nov 25 09:56:40 crc kubenswrapper[4854]: I1125 09:56:40.841829 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-ff4zf" event={"ID":"39cb343e-3bd0-4a63-b3dc-6814e8e8bca1","Type":"ContainerStarted","Data":"b7012b3c44ca53f4d5a34b686316982436f2bd057ee6b805361f8a57b6ea79d0"} Nov 25 09:56:40 crc kubenswrapper[4854]: I1125 09:56:40.844573 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-87xrn" event={"ID":"107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc","Type":"ContainerStarted","Data":"0c63b9c263cb9da3c29542ce42e128c5caefe5400d5466dd8992a271d1769146"} Nov 25 09:56:40 crc kubenswrapper[4854]: I1125 09:56:40.846599 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"575fe5df-5a76-4633-9688-3997a708f3f4","Type":"ContainerStarted","Data":"a3871b7ff59f7e655f575c3f64e51e999b3cdec859edb86a4be2741b96d9f09f"} Nov 25 09:56:40 crc kubenswrapper[4854]: W1125 09:56:40.914510 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4fff0203_70fe_435a_b845_cc6e4c321b60.slice/crio-6133b006361b7edf4a77cc0cb8865f785a380e21aadf11374f66700dba2ea3b3 WatchSource:0}: Error finding container 6133b006361b7edf4a77cc0cb8865f785a380e21aadf11374f66700dba2ea3b3: Status 404 returned error can't find the container with id 6133b006361b7edf4a77cc0cb8865f785a380e21aadf11374f66700dba2ea3b3 Nov 25 09:56:40 crc kubenswrapper[4854]: I1125 09:56:40.922298 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8661757b-9fae-4808-9568-3997a0b0c7b6-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-qf5fg\" (UID: \"8661757b-9fae-4808-9568-3997a0b0c7b6\") " pod="openstack/dnsmasq-dns-b8fbc5445-qf5fg" Nov 25 09:56:40 crc kubenswrapper[4854]: I1125 09:56:40.922412 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8661757b-9fae-4808-9568-3997a0b0c7b6-config\") pod \"dnsmasq-dns-b8fbc5445-qf5fg\" (UID: \"8661757b-9fae-4808-9568-3997a0b0c7b6\") " pod="openstack/dnsmasq-dns-b8fbc5445-qf5fg" Nov 25 09:56:40 crc kubenswrapper[4854]: I1125 09:56:40.922507 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8661757b-9fae-4808-9568-3997a0b0c7b6-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-qf5fg\" (UID: \"8661757b-9fae-4808-9568-3997a0b0c7b6\") " pod="openstack/dnsmasq-dns-b8fbc5445-qf5fg" Nov 25 09:56:40 crc kubenswrapper[4854]: I1125 09:56:40.922659 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9c6nh\" (UniqueName: \"kubernetes.io/projected/8661757b-9fae-4808-9568-3997a0b0c7b6-kube-api-access-9c6nh\") pod \"dnsmasq-dns-b8fbc5445-qf5fg\" (UID: \"8661757b-9fae-4808-9568-3997a0b0c7b6\") " pod="openstack/dnsmasq-dns-b8fbc5445-qf5fg" Nov 25 09:56:40 crc kubenswrapper[4854]: I1125 09:56:40.922715 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8661757b-9fae-4808-9568-3997a0b0c7b6-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-qf5fg\" (UID: \"8661757b-9fae-4808-9568-3997a0b0c7b6\") " pod="openstack/dnsmasq-dns-b8fbc5445-qf5fg" Nov 25 09:56:40 crc kubenswrapper[4854]: I1125 09:56:40.923502 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8661757b-9fae-4808-9568-3997a0b0c7b6-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-qf5fg\" (UID: \"8661757b-9fae-4808-9568-3997a0b0c7b6\") " pod="openstack/dnsmasq-dns-b8fbc5445-qf5fg" Nov 25 09:56:40 crc kubenswrapper[4854]: I1125 09:56:40.924814 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8661757b-9fae-4808-9568-3997a0b0c7b6-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-qf5fg\" (UID: \"8661757b-9fae-4808-9568-3997a0b0c7b6\") " pod="openstack/dnsmasq-dns-b8fbc5445-qf5fg" Nov 25 09:56:40 crc kubenswrapper[4854]: I1125 09:56:40.926556 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8661757b-9fae-4808-9568-3997a0b0c7b6-config\") pod \"dnsmasq-dns-b8fbc5445-qf5fg\" (UID: \"8661757b-9fae-4808-9568-3997a0b0c7b6\") " pod="openstack/dnsmasq-dns-b8fbc5445-qf5fg" Nov 25 09:56:40 crc kubenswrapper[4854]: I1125 09:56:40.927401 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8661757b-9fae-4808-9568-3997a0b0c7b6-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-qf5fg\" (UID: \"8661757b-9fae-4808-9568-3997a0b0c7b6\") " pod="openstack/dnsmasq-dns-b8fbc5445-qf5fg" Nov 25 09:56:40 crc kubenswrapper[4854]: I1125 09:56:40.938091 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 25 09:56:40 crc kubenswrapper[4854]: I1125 09:56:40.964835 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9c6nh\" (UniqueName: \"kubernetes.io/projected/8661757b-9fae-4808-9568-3997a0b0c7b6-kube-api-access-9c6nh\") pod \"dnsmasq-dns-b8fbc5445-qf5fg\" (UID: \"8661757b-9fae-4808-9568-3997a0b0c7b6\") " pod="openstack/dnsmasq-dns-b8fbc5445-qf5fg" Nov 25 09:56:40 crc kubenswrapper[4854]: I1125 09:56:40.990497 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-qf5fg" Nov 25 09:56:41 crc kubenswrapper[4854]: I1125 09:56:41.137339 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 25 09:56:41 crc kubenswrapper[4854]: I1125 09:56:41.139309 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 25 09:56:41 crc kubenswrapper[4854]: I1125 09:56:41.146602 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-nx5rr" Nov 25 09:56:41 crc kubenswrapper[4854]: I1125 09:56:41.146750 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 25 09:56:41 crc kubenswrapper[4854]: I1125 09:56:41.152255 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 25 09:56:41 crc kubenswrapper[4854]: I1125 09:56:41.152480 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 25 09:56:41 crc kubenswrapper[4854]: I1125 09:56:41.164988 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.229505 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c660812-03bd-4475-895a-d896c14ef125-config\") pod \"ovn-northd-0\" (UID: \"6c660812-03bd-4475-895a-d896c14ef125\") " pod="openstack/ovn-northd-0" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.229780 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/6c660812-03bd-4475-895a-d896c14ef125-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"6c660812-03bd-4475-895a-d896c14ef125\") " pod="openstack/ovn-northd-0" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.229992 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6c660812-03bd-4475-895a-d896c14ef125-scripts\") pod \"ovn-northd-0\" (UID: \"6c660812-03bd-4475-895a-d896c14ef125\") " pod="openstack/ovn-northd-0" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.230048 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5m4tc\" (UniqueName: \"kubernetes.io/projected/6c660812-03bd-4475-895a-d896c14ef125-kube-api-access-5m4tc\") pod \"ovn-northd-0\" (UID: \"6c660812-03bd-4475-895a-d896c14ef125\") " pod="openstack/ovn-northd-0" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.230107 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c660812-03bd-4475-895a-d896c14ef125-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"6c660812-03bd-4475-895a-d896c14ef125\") " pod="openstack/ovn-northd-0" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.230131 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c660812-03bd-4475-895a-d896c14ef125-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"6c660812-03bd-4475-895a-d896c14ef125\") " pod="openstack/ovn-northd-0" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.230152 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c660812-03bd-4475-895a-d896c14ef125-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"6c660812-03bd-4475-895a-d896c14ef125\") " pod="openstack/ovn-northd-0" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.332394 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c660812-03bd-4475-895a-d896c14ef125-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"6c660812-03bd-4475-895a-d896c14ef125\") " pod="openstack/ovn-northd-0" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.332435 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c660812-03bd-4475-895a-d896c14ef125-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"6c660812-03bd-4475-895a-d896c14ef125\") " pod="openstack/ovn-northd-0" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.333486 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c660812-03bd-4475-895a-d896c14ef125-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"6c660812-03bd-4475-895a-d896c14ef125\") " pod="openstack/ovn-northd-0" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.333541 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c660812-03bd-4475-895a-d896c14ef125-config\") pod \"ovn-northd-0\" (UID: \"6c660812-03bd-4475-895a-d896c14ef125\") " pod="openstack/ovn-northd-0" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.333589 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/6c660812-03bd-4475-895a-d896c14ef125-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"6c660812-03bd-4475-895a-d896c14ef125\") " pod="openstack/ovn-northd-0" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.333755 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6c660812-03bd-4475-895a-d896c14ef125-scripts\") pod \"ovn-northd-0\" (UID: \"6c660812-03bd-4475-895a-d896c14ef125\") " pod="openstack/ovn-northd-0" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.333794 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5m4tc\" (UniqueName: \"kubernetes.io/projected/6c660812-03bd-4475-895a-d896c14ef125-kube-api-access-5m4tc\") pod \"ovn-northd-0\" (UID: \"6c660812-03bd-4475-895a-d896c14ef125\") " pod="openstack/ovn-northd-0" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.335895 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/6c660812-03bd-4475-895a-d896c14ef125-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"6c660812-03bd-4475-895a-d896c14ef125\") " pod="openstack/ovn-northd-0" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.336467 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6c660812-03bd-4475-895a-d896c14ef125-scripts\") pod \"ovn-northd-0\" (UID: \"6c660812-03bd-4475-895a-d896c14ef125\") " pod="openstack/ovn-northd-0" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.336587 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c660812-03bd-4475-895a-d896c14ef125-config\") pod \"ovn-northd-0\" (UID: \"6c660812-03bd-4475-895a-d896c14ef125\") " pod="openstack/ovn-northd-0" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.340618 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c660812-03bd-4475-895a-d896c14ef125-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"6c660812-03bd-4475-895a-d896c14ef125\") " pod="openstack/ovn-northd-0" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.341140 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c660812-03bd-4475-895a-d896c14ef125-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"6c660812-03bd-4475-895a-d896c14ef125\") " pod="openstack/ovn-northd-0" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.342950 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c660812-03bd-4475-895a-d896c14ef125-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"6c660812-03bd-4475-895a-d896c14ef125\") " pod="openstack/ovn-northd-0" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.359153 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5m4tc\" (UniqueName: \"kubernetes.io/projected/6c660812-03bd-4475-895a-d896c14ef125-kube-api-access-5m4tc\") pod \"ovn-northd-0\" (UID: \"6c660812-03bd-4475-895a-d896c14ef125\") " pod="openstack/ovn-northd-0" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.640629 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.765660 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.791798 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.794950 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.795385 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.795463 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.795553 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-fzhpc" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.797604 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.848826 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/eb6d8324-0633-4891-9a9c-f782e7cec247-cache\") pod \"swift-storage-0\" (UID: \"eb6d8324-0633-4891-9a9c-f782e7cec247\") " pod="openstack/swift-storage-0" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.849139 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/eb6d8324-0633-4891-9a9c-f782e7cec247-lock\") pod \"swift-storage-0\" (UID: \"eb6d8324-0633-4891-9a9c-f782e7cec247\") " pod="openstack/swift-storage-0" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.849178 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"swift-storage-0\" (UID: \"eb6d8324-0633-4891-9a9c-f782e7cec247\") " pod="openstack/swift-storage-0" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.849209 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tg7pf\" (UniqueName: \"kubernetes.io/projected/eb6d8324-0633-4891-9a9c-f782e7cec247-kube-api-access-tg7pf\") pod \"swift-storage-0\" (UID: \"eb6d8324-0633-4891-9a9c-f782e7cec247\") " pod="openstack/swift-storage-0" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.849270 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/eb6d8324-0633-4891-9a9c-f782e7cec247-etc-swift\") pod \"swift-storage-0\" (UID: \"eb6d8324-0633-4891-9a9c-f782e7cec247\") " pod="openstack/swift-storage-0" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.881291 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-2hn7c" event={"ID":"4fff0203-70fe-435a-b845-cc6e4c321b60","Type":"ContainerStarted","Data":"d60054ae0a608f5619d6864b90e60d90e9cd82a82b9e48343572db9561a935ba"} Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.881337 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-2hn7c" event={"ID":"4fff0203-70fe-435a-b845-cc6e4c321b60","Type":"ContainerStarted","Data":"6133b006361b7edf4a77cc0cb8865f785a380e21aadf11374f66700dba2ea3b3"} Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.881483 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8554648995-2hn7c" podUID="4fff0203-70fe-435a-b845-cc6e4c321b60" containerName="init" containerID="cri-o://d60054ae0a608f5619d6864b90e60d90e9cd82a82b9e48343572db9561a935ba" gracePeriod=10 Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.890041 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-c6npv" event={"ID":"a5aeba85-24cb-48ef-9050-93a40b4d67f0","Type":"ContainerStarted","Data":"ce11dff253a673059a5c270319723570a1e7fc17dbd7320d49cf146ef1b2eaf1"} Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.892909 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-ff4zf" event={"ID":"39cb343e-3bd0-4a63-b3dc-6814e8e8bca1","Type":"ContainerStarted","Data":"faff8834abb54f87f80e9a4c71922f52fff117ce27275a4d97b78b00007e7ef3"} Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.896261 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"93c535b4-23bb-4c71-8ddc-1304ca205e55","Type":"ContainerStarted","Data":"a6703b655a254b75add489c6b2c87a50e14d61760651e6996b6ba25d68203c5a"} Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.899139 4854 generic.go:334] "Generic (PLEG): container finished" podID="107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc" containerID="eeaa41c10387e71ecc4f1bac734971b0e2018195e623859551d759b21914c68b" exitCode=0 Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.899188 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-87xrn" event={"ID":"107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc","Type":"ContainerDied","Data":"eeaa41c10387e71ecc4f1bac734971b0e2018195e623859551d759b21914c68b"} Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.952021 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/eb6d8324-0633-4891-9a9c-f782e7cec247-cache\") pod \"swift-storage-0\" (UID: \"eb6d8324-0633-4891-9a9c-f782e7cec247\") " pod="openstack/swift-storage-0" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.952129 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/eb6d8324-0633-4891-9a9c-f782e7cec247-lock\") pod \"swift-storage-0\" (UID: \"eb6d8324-0633-4891-9a9c-f782e7cec247\") " pod="openstack/swift-storage-0" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.952187 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"swift-storage-0\" (UID: \"eb6d8324-0633-4891-9a9c-f782e7cec247\") " pod="openstack/swift-storage-0" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.952248 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tg7pf\" (UniqueName: \"kubernetes.io/projected/eb6d8324-0633-4891-9a9c-f782e7cec247-kube-api-access-tg7pf\") pod \"swift-storage-0\" (UID: \"eb6d8324-0633-4891-9a9c-f782e7cec247\") " pod="openstack/swift-storage-0" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.952375 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/eb6d8324-0633-4891-9a9c-f782e7cec247-etc-swift\") pod \"swift-storage-0\" (UID: \"eb6d8324-0633-4891-9a9c-f782e7cec247\") " pod="openstack/swift-storage-0" Nov 25 09:56:43 crc kubenswrapper[4854]: E1125 09:56:41.952580 4854 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 09:56:43 crc kubenswrapper[4854]: E1125 09:56:41.952595 4854 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 09:56:43 crc kubenswrapper[4854]: E1125 09:56:41.952637 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/eb6d8324-0633-4891-9a9c-f782e7cec247-etc-swift podName:eb6d8324-0633-4891-9a9c-f782e7cec247 nodeName:}" failed. No retries permitted until 2025-11-25 09:56:42.452620828 +0000 UTC m=+1208.305614204 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/eb6d8324-0633-4891-9a9c-f782e7cec247-etc-swift") pod "swift-storage-0" (UID: "eb6d8324-0633-4891-9a9c-f782e7cec247") : configmap "swift-ring-files" not found Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.953061 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/eb6d8324-0633-4891-9a9c-f782e7cec247-lock\") pod \"swift-storage-0\" (UID: \"eb6d8324-0633-4891-9a9c-f782e7cec247\") " pod="openstack/swift-storage-0" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.953340 4854 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"swift-storage-0\" (UID: \"eb6d8324-0633-4891-9a9c-f782e7cec247\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/swift-storage-0" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.955551 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/eb6d8324-0633-4891-9a9c-f782e7cec247-cache\") pod \"swift-storage-0\" (UID: \"eb6d8324-0633-4891-9a9c-f782e7cec247\") " pod="openstack/swift-storage-0" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.965634 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-c6npv" podStartSLOduration=12.541570222 podStartE2EDuration="41.965611294s" podCreationTimestamp="2025-11-25 09:56:00 +0000 UTC" firstStartedPulling="2025-11-25 09:56:10.619843244 +0000 UTC m=+1176.472836620" lastFinishedPulling="2025-11-25 09:56:40.043884316 +0000 UTC m=+1205.896877692" observedRunningTime="2025-11-25 09:56:41.951794055 +0000 UTC m=+1207.804787431" watchObservedRunningTime="2025-11-25 09:56:41.965611294 +0000 UTC m=+1207.818604670" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.980528 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-ff4zf" podStartSLOduration=2.980510073 podStartE2EDuration="2.980510073s" podCreationTimestamp="2025-11-25 09:56:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:56:41.978262761 +0000 UTC m=+1207.831256147" watchObservedRunningTime="2025-11-25 09:56:41.980510073 +0000 UTC m=+1207.833503459" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.987486 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tg7pf\" (UniqueName: \"kubernetes.io/projected/eb6d8324-0633-4891-9a9c-f782e7cec247-kube-api-access-tg7pf\") pod \"swift-storage-0\" (UID: \"eb6d8324-0633-4891-9a9c-f782e7cec247\") " pod="openstack/swift-storage-0" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:41.998719 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"swift-storage-0\" (UID: \"eb6d8324-0633-4891-9a9c-f782e7cec247\") " pod="openstack/swift-storage-0" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:42.353488 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-m8t44"] Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:42.357455 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-m8t44" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:42.368584 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-m8t44"] Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:42.396339 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:42.396460 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:42.396603 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:42.462438 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/eb6d8324-0633-4891-9a9c-f782e7cec247-etc-swift\") pod \"swift-storage-0\" (UID: \"eb6d8324-0633-4891-9a9c-f782e7cec247\") " pod="openstack/swift-storage-0" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:42.462495 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/5a1d05ce-0c76-4823-a86f-004ea7655be9-swiftconf\") pod \"swift-ring-rebalance-m8t44\" (UID: \"5a1d05ce-0c76-4823-a86f-004ea7655be9\") " pod="openstack/swift-ring-rebalance-m8t44" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:42.462559 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/5a1d05ce-0c76-4823-a86f-004ea7655be9-dispersionconf\") pod \"swift-ring-rebalance-m8t44\" (UID: \"5a1d05ce-0c76-4823-a86f-004ea7655be9\") " pod="openstack/swift-ring-rebalance-m8t44" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:42.462591 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a1d05ce-0c76-4823-a86f-004ea7655be9-combined-ca-bundle\") pod \"swift-ring-rebalance-m8t44\" (UID: \"5a1d05ce-0c76-4823-a86f-004ea7655be9\") " pod="openstack/swift-ring-rebalance-m8t44" Nov 25 09:56:43 crc kubenswrapper[4854]: E1125 09:56:42.462686 4854 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 09:56:43 crc kubenswrapper[4854]: E1125 09:56:42.462717 4854 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 09:56:43 crc kubenswrapper[4854]: E1125 09:56:42.462772 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/eb6d8324-0633-4891-9a9c-f782e7cec247-etc-swift podName:eb6d8324-0633-4891-9a9c-f782e7cec247 nodeName:}" failed. No retries permitted until 2025-11-25 09:56:43.462753832 +0000 UTC m=+1209.315747198 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/eb6d8324-0633-4891-9a9c-f782e7cec247-etc-swift") pod "swift-storage-0" (UID: "eb6d8324-0633-4891-9a9c-f782e7cec247") : configmap "swift-ring-files" not found Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:42.462806 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5a1d05ce-0c76-4823-a86f-004ea7655be9-scripts\") pod \"swift-ring-rebalance-m8t44\" (UID: \"5a1d05ce-0c76-4823-a86f-004ea7655be9\") " pod="openstack/swift-ring-rebalance-m8t44" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:42.462968 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r2wrc\" (UniqueName: \"kubernetes.io/projected/5a1d05ce-0c76-4823-a86f-004ea7655be9-kube-api-access-r2wrc\") pod \"swift-ring-rebalance-m8t44\" (UID: \"5a1d05ce-0c76-4823-a86f-004ea7655be9\") " pod="openstack/swift-ring-rebalance-m8t44" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:42.463011 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/5a1d05ce-0c76-4823-a86f-004ea7655be9-ring-data-devices\") pod \"swift-ring-rebalance-m8t44\" (UID: \"5a1d05ce-0c76-4823-a86f-004ea7655be9\") " pod="openstack/swift-ring-rebalance-m8t44" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:42.463040 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/5a1d05ce-0c76-4823-a86f-004ea7655be9-etc-swift\") pod \"swift-ring-rebalance-m8t44\" (UID: \"5a1d05ce-0c76-4823-a86f-004ea7655be9\") " pod="openstack/swift-ring-rebalance-m8t44" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:42.564760 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/5a1d05ce-0c76-4823-a86f-004ea7655be9-dispersionconf\") pod \"swift-ring-rebalance-m8t44\" (UID: \"5a1d05ce-0c76-4823-a86f-004ea7655be9\") " pod="openstack/swift-ring-rebalance-m8t44" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:42.564852 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a1d05ce-0c76-4823-a86f-004ea7655be9-combined-ca-bundle\") pod \"swift-ring-rebalance-m8t44\" (UID: \"5a1d05ce-0c76-4823-a86f-004ea7655be9\") " pod="openstack/swift-ring-rebalance-m8t44" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:42.564890 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5a1d05ce-0c76-4823-a86f-004ea7655be9-scripts\") pod \"swift-ring-rebalance-m8t44\" (UID: \"5a1d05ce-0c76-4823-a86f-004ea7655be9\") " pod="openstack/swift-ring-rebalance-m8t44" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:42.564945 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r2wrc\" (UniqueName: \"kubernetes.io/projected/5a1d05ce-0c76-4823-a86f-004ea7655be9-kube-api-access-r2wrc\") pod \"swift-ring-rebalance-m8t44\" (UID: \"5a1d05ce-0c76-4823-a86f-004ea7655be9\") " pod="openstack/swift-ring-rebalance-m8t44" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:42.564967 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/5a1d05ce-0c76-4823-a86f-004ea7655be9-ring-data-devices\") pod \"swift-ring-rebalance-m8t44\" (UID: \"5a1d05ce-0c76-4823-a86f-004ea7655be9\") " pod="openstack/swift-ring-rebalance-m8t44" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:42.564993 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/5a1d05ce-0c76-4823-a86f-004ea7655be9-etc-swift\") pod \"swift-ring-rebalance-m8t44\" (UID: \"5a1d05ce-0c76-4823-a86f-004ea7655be9\") " pod="openstack/swift-ring-rebalance-m8t44" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:42.565204 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/5a1d05ce-0c76-4823-a86f-004ea7655be9-swiftconf\") pod \"swift-ring-rebalance-m8t44\" (UID: \"5a1d05ce-0c76-4823-a86f-004ea7655be9\") " pod="openstack/swift-ring-rebalance-m8t44" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:42.565574 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/5a1d05ce-0c76-4823-a86f-004ea7655be9-etc-swift\") pod \"swift-ring-rebalance-m8t44\" (UID: \"5a1d05ce-0c76-4823-a86f-004ea7655be9\") " pod="openstack/swift-ring-rebalance-m8t44" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:42.565634 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5a1d05ce-0c76-4823-a86f-004ea7655be9-scripts\") pod \"swift-ring-rebalance-m8t44\" (UID: \"5a1d05ce-0c76-4823-a86f-004ea7655be9\") " pod="openstack/swift-ring-rebalance-m8t44" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:42.565743 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/5a1d05ce-0c76-4823-a86f-004ea7655be9-ring-data-devices\") pod \"swift-ring-rebalance-m8t44\" (UID: \"5a1d05ce-0c76-4823-a86f-004ea7655be9\") " pod="openstack/swift-ring-rebalance-m8t44" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:42.568068 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/5a1d05ce-0c76-4823-a86f-004ea7655be9-swiftconf\") pod \"swift-ring-rebalance-m8t44\" (UID: \"5a1d05ce-0c76-4823-a86f-004ea7655be9\") " pod="openstack/swift-ring-rebalance-m8t44" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:42.568279 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a1d05ce-0c76-4823-a86f-004ea7655be9-combined-ca-bundle\") pod \"swift-ring-rebalance-m8t44\" (UID: \"5a1d05ce-0c76-4823-a86f-004ea7655be9\") " pod="openstack/swift-ring-rebalance-m8t44" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:42.568343 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/5a1d05ce-0c76-4823-a86f-004ea7655be9-dispersionconf\") pod \"swift-ring-rebalance-m8t44\" (UID: \"5a1d05ce-0c76-4823-a86f-004ea7655be9\") " pod="openstack/swift-ring-rebalance-m8t44" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:42.593316 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r2wrc\" (UniqueName: \"kubernetes.io/projected/5a1d05ce-0c76-4823-a86f-004ea7655be9-kube-api-access-r2wrc\") pod \"swift-ring-rebalance-m8t44\" (UID: \"5a1d05ce-0c76-4823-a86f-004ea7655be9\") " pod="openstack/swift-ring-rebalance-m8t44" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:42.720728 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-m8t44" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:42.913984 4854 generic.go:334] "Generic (PLEG): container finished" podID="4fff0203-70fe-435a-b845-cc6e4c321b60" containerID="d60054ae0a608f5619d6864b90e60d90e9cd82a82b9e48343572db9561a935ba" exitCode=0 Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:42.914322 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-2hn7c" event={"ID":"4fff0203-70fe-435a-b845-cc6e4c321b60","Type":"ContainerDied","Data":"d60054ae0a608f5619d6864b90e60d90e9cd82a82b9e48343572db9561a935ba"} Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:43.487111 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/eb6d8324-0633-4891-9a9c-f782e7cec247-etc-swift\") pod \"swift-storage-0\" (UID: \"eb6d8324-0633-4891-9a9c-f782e7cec247\") " pod="openstack/swift-storage-0" Nov 25 09:56:43 crc kubenswrapper[4854]: E1125 09:56:43.487282 4854 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 09:56:43 crc kubenswrapper[4854]: E1125 09:56:43.487296 4854 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 09:56:43 crc kubenswrapper[4854]: E1125 09:56:43.487338 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/eb6d8324-0633-4891-9a9c-f782e7cec247-etc-swift podName:eb6d8324-0633-4891-9a9c-f782e7cec247 nodeName:}" failed. No retries permitted until 2025-11-25 09:56:45.487323419 +0000 UTC m=+1211.340316785 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/eb6d8324-0633-4891-9a9c-f782e7cec247-etc-swift") pod "swift-storage-0" (UID: "eb6d8324-0633-4891-9a9c-f782e7cec247") : configmap "swift-ring-files" not found Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:43.776965 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-87xrn" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:43.821863 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-qf5fg"] Nov 25 09:56:43 crc kubenswrapper[4854]: W1125 09:56:43.821920 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8661757b_9fae_4808_9568_3997a0b0c7b6.slice/crio-0f5484dfda6089d7307a6ecdb0443326211d7ec97d86d18c56062bddcd888de1 WatchSource:0}: Error finding container 0f5484dfda6089d7307a6ecdb0443326211d7ec97d86d18c56062bddcd888de1: Status 404 returned error can't find the container with id 0f5484dfda6089d7307a6ecdb0443326211d7ec97d86d18c56062bddcd888de1 Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:43.905287 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc-config\") pod \"107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc\" (UID: \"107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc\") " Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:43.905407 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fzgg8\" (UniqueName: \"kubernetes.io/projected/107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc-kube-api-access-fzgg8\") pod \"107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc\" (UID: \"107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc\") " Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:43.905526 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc-dns-svc\") pod \"107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc\" (UID: \"107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc\") " Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:43.905613 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc-ovsdbserver-sb\") pod \"107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc\" (UID: \"107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc\") " Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:43.911845 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc-kube-api-access-fzgg8" (OuterVolumeSpecName: "kube-api-access-fzgg8") pod "107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc" (UID: "107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc"). InnerVolumeSpecName "kube-api-access-fzgg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:43.942466 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc" (UID: "107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:43.942907 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc-config" (OuterVolumeSpecName: "config") pod "107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc" (UID: "107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:43.945098 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-2hn7c" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:43.955011 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-87xrn" event={"ID":"107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc","Type":"ContainerDied","Data":"0c63b9c263cb9da3c29542ce42e128c5caefe5400d5466dd8992a271d1769146"} Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:43.955084 4854 scope.go:117] "RemoveContainer" containerID="eeaa41c10387e71ecc4f1bac734971b0e2018195e623859551d759b21914c68b" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:43.955222 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-87xrn" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:43.955804 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:43.962084 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-2hn7c" event={"ID":"4fff0203-70fe-435a-b845-cc6e4c321b60","Type":"ContainerDied","Data":"6133b006361b7edf4a77cc0cb8865f785a380e21aadf11374f66700dba2ea3b3"} Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:43.962136 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-2hn7c" Nov 25 09:56:43 crc kubenswrapper[4854]: W1125 09:56:43.963449 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6c660812_03bd_4475_895a_d896c14ef125.slice/crio-c848b2424954e24e50b7ed3b6f3c1ae5a4271290b6f82cd146692339294c0e9a WatchSource:0}: Error finding container c848b2424954e24e50b7ed3b6f3c1ae5a4271290b6f82cd146692339294c0e9a: Status 404 returned error can't find the container with id c848b2424954e24e50b7ed3b6f3c1ae5a4271290b6f82cd146692339294c0e9a Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:43.971279 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-qf5fg" event={"ID":"8661757b-9fae-4808-9568-3997a0b0c7b6","Type":"ContainerStarted","Data":"0f5484dfda6089d7307a6ecdb0443326211d7ec97d86d18c56062bddcd888de1"} Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:43.975912 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc" (UID: "107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:43.983367 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-m8t44"] Nov 25 09:56:43 crc kubenswrapper[4854]: I1125 09:56:43.990835 4854 scope.go:117] "RemoveContainer" containerID="d60054ae0a608f5619d6864b90e60d90e9cd82a82b9e48343572db9561a935ba" Nov 25 09:56:44 crc kubenswrapper[4854]: I1125 09:56:44.010664 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fzgg8\" (UniqueName: \"kubernetes.io/projected/107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc-kube-api-access-fzgg8\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:44 crc kubenswrapper[4854]: I1125 09:56:44.010720 4854 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:44 crc kubenswrapper[4854]: I1125 09:56:44.010734 4854 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:44 crc kubenswrapper[4854]: I1125 09:56:44.010745 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:44 crc kubenswrapper[4854]: I1125 09:56:44.112043 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sbl9h\" (UniqueName: \"kubernetes.io/projected/4fff0203-70fe-435a-b845-cc6e4c321b60-kube-api-access-sbl9h\") pod \"4fff0203-70fe-435a-b845-cc6e4c321b60\" (UID: \"4fff0203-70fe-435a-b845-cc6e4c321b60\") " Nov 25 09:56:44 crc kubenswrapper[4854]: I1125 09:56:44.112124 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4fff0203-70fe-435a-b845-cc6e4c321b60-ovsdbserver-sb\") pod \"4fff0203-70fe-435a-b845-cc6e4c321b60\" (UID: \"4fff0203-70fe-435a-b845-cc6e4c321b60\") " Nov 25 09:56:44 crc kubenswrapper[4854]: I1125 09:56:44.112248 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4fff0203-70fe-435a-b845-cc6e4c321b60-dns-svc\") pod \"4fff0203-70fe-435a-b845-cc6e4c321b60\" (UID: \"4fff0203-70fe-435a-b845-cc6e4c321b60\") " Nov 25 09:56:44 crc kubenswrapper[4854]: I1125 09:56:44.112362 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4fff0203-70fe-435a-b845-cc6e4c321b60-ovsdbserver-nb\") pod \"4fff0203-70fe-435a-b845-cc6e4c321b60\" (UID: \"4fff0203-70fe-435a-b845-cc6e4c321b60\") " Nov 25 09:56:44 crc kubenswrapper[4854]: I1125 09:56:44.112380 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fff0203-70fe-435a-b845-cc6e4c321b60-config\") pod \"4fff0203-70fe-435a-b845-cc6e4c321b60\" (UID: \"4fff0203-70fe-435a-b845-cc6e4c321b60\") " Nov 25 09:56:44 crc kubenswrapper[4854]: I1125 09:56:44.116231 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4fff0203-70fe-435a-b845-cc6e4c321b60-kube-api-access-sbl9h" (OuterVolumeSpecName: "kube-api-access-sbl9h") pod "4fff0203-70fe-435a-b845-cc6e4c321b60" (UID: "4fff0203-70fe-435a-b845-cc6e4c321b60"). InnerVolumeSpecName "kube-api-access-sbl9h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:56:44 crc kubenswrapper[4854]: I1125 09:56:44.135855 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fff0203-70fe-435a-b845-cc6e4c321b60-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "4fff0203-70fe-435a-b845-cc6e4c321b60" (UID: "4fff0203-70fe-435a-b845-cc6e4c321b60"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:56:44 crc kubenswrapper[4854]: I1125 09:56:44.136530 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fff0203-70fe-435a-b845-cc6e4c321b60-config" (OuterVolumeSpecName: "config") pod "4fff0203-70fe-435a-b845-cc6e4c321b60" (UID: "4fff0203-70fe-435a-b845-cc6e4c321b60"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:56:44 crc kubenswrapper[4854]: I1125 09:56:44.137309 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fff0203-70fe-435a-b845-cc6e4c321b60-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "4fff0203-70fe-435a-b845-cc6e4c321b60" (UID: "4fff0203-70fe-435a-b845-cc6e4c321b60"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:56:44 crc kubenswrapper[4854]: I1125 09:56:44.139093 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fff0203-70fe-435a-b845-cc6e4c321b60-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4fff0203-70fe-435a-b845-cc6e4c321b60" (UID: "4fff0203-70fe-435a-b845-cc6e4c321b60"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:56:44 crc kubenswrapper[4854]: I1125 09:56:44.214713 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sbl9h\" (UniqueName: \"kubernetes.io/projected/4fff0203-70fe-435a-b845-cc6e4c321b60-kube-api-access-sbl9h\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:44 crc kubenswrapper[4854]: I1125 09:56:44.214738 4854 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4fff0203-70fe-435a-b845-cc6e4c321b60-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:44 crc kubenswrapper[4854]: I1125 09:56:44.214749 4854 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4fff0203-70fe-435a-b845-cc6e4c321b60-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:44 crc kubenswrapper[4854]: I1125 09:56:44.214756 4854 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4fff0203-70fe-435a-b845-cc6e4c321b60-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:44 crc kubenswrapper[4854]: I1125 09:56:44.214764 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fff0203-70fe-435a-b845-cc6e4c321b60-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:44 crc kubenswrapper[4854]: I1125 09:56:44.319138 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-87xrn"] Nov 25 09:56:44 crc kubenswrapper[4854]: I1125 09:56:44.323664 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-87xrn"] Nov 25 09:56:44 crc kubenswrapper[4854]: I1125 09:56:44.374553 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-2hn7c"] Nov 25 09:56:44 crc kubenswrapper[4854]: I1125 09:56:44.388826 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8554648995-2hn7c"] Nov 25 09:56:44 crc kubenswrapper[4854]: I1125 09:56:44.986509 4854 generic.go:334] "Generic (PLEG): container finished" podID="8661757b-9fae-4808-9568-3997a0b0c7b6" containerID="043a0130e4f0235379b8dcddc49849f293b1e7e83623af4ffe6f52ed90563889" exitCode=0 Nov 25 09:56:44 crc kubenswrapper[4854]: I1125 09:56:44.986617 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-qf5fg" event={"ID":"8661757b-9fae-4808-9568-3997a0b0c7b6","Type":"ContainerDied","Data":"043a0130e4f0235379b8dcddc49849f293b1e7e83623af4ffe6f52ed90563889"} Nov 25 09:56:45 crc kubenswrapper[4854]: I1125 09:56:45.006080 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-m8t44" event={"ID":"5a1d05ce-0c76-4823-a86f-004ea7655be9","Type":"ContainerStarted","Data":"b758832bfc0fb713e514e67547b7b3fa0d9401465d5f9be008227942ec57aac2"} Nov 25 09:56:45 crc kubenswrapper[4854]: I1125 09:56:45.008265 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"6c660812-03bd-4475-895a-d896c14ef125","Type":"ContainerStarted","Data":"c848b2424954e24e50b7ed3b6f3c1ae5a4271290b6f82cd146692339294c0e9a"} Nov 25 09:56:45 crc kubenswrapper[4854]: I1125 09:56:45.072562 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc" path="/var/lib/kubelet/pods/107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc/volumes" Nov 25 09:56:45 crc kubenswrapper[4854]: I1125 09:56:45.077789 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4fff0203-70fe-435a-b845-cc6e4c321b60" path="/var/lib/kubelet/pods/4fff0203-70fe-435a-b845-cc6e4c321b60/volumes" Nov 25 09:56:45 crc kubenswrapper[4854]: E1125 09:56:45.458405 4854 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.184:53434->38.102.83.184:43333: write tcp 38.102.83.184:53434->38.102.83.184:43333: write: broken pipe Nov 25 09:56:45 crc kubenswrapper[4854]: I1125 09:56:45.548587 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/eb6d8324-0633-4891-9a9c-f782e7cec247-etc-swift\") pod \"swift-storage-0\" (UID: \"eb6d8324-0633-4891-9a9c-f782e7cec247\") " pod="openstack/swift-storage-0" Nov 25 09:56:45 crc kubenswrapper[4854]: E1125 09:56:45.548881 4854 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 09:56:45 crc kubenswrapper[4854]: E1125 09:56:45.548913 4854 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 09:56:45 crc kubenswrapper[4854]: E1125 09:56:45.548987 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/eb6d8324-0633-4891-9a9c-f782e7cec247-etc-swift podName:eb6d8324-0633-4891-9a9c-f782e7cec247 nodeName:}" failed. No retries permitted until 2025-11-25 09:56:49.548962675 +0000 UTC m=+1215.401956061 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/eb6d8324-0633-4891-9a9c-f782e7cec247-etc-swift") pod "swift-storage-0" (UID: "eb6d8324-0633-4891-9a9c-f782e7cec247") : configmap "swift-ring-files" not found Nov 25 09:56:46 crc kubenswrapper[4854]: I1125 09:56:46.018002 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"6c660812-03bd-4475-895a-d896c14ef125","Type":"ContainerStarted","Data":"db12ccf05e748cdaaa7cbb74d0f0763640f617f848ccd18b86139ca7b032b847"} Nov 25 09:56:46 crc kubenswrapper[4854]: I1125 09:56:46.018056 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"6c660812-03bd-4475-895a-d896c14ef125","Type":"ContainerStarted","Data":"3ce9590dc7506f6a00e60a7585b7c3a09539a18407cbb726de19c1def6cc959e"} Nov 25 09:56:46 crc kubenswrapper[4854]: I1125 09:56:46.018145 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 25 09:56:46 crc kubenswrapper[4854]: I1125 09:56:46.019913 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-qf5fg" event={"ID":"8661757b-9fae-4808-9568-3997a0b0c7b6","Type":"ContainerStarted","Data":"69aac727da93d49e87569df43f08d80deb5ad646479eb358200a163bc2cd1b6b"} Nov 25 09:56:46 crc kubenswrapper[4854]: I1125 09:56:46.020141 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-b8fbc5445-qf5fg" Nov 25 09:56:46 crc kubenswrapper[4854]: I1125 09:56:46.040152 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=3.607690263 podStartE2EDuration="5.040135329s" podCreationTimestamp="2025-11-25 09:56:41 +0000 UTC" firstStartedPulling="2025-11-25 09:56:43.969778184 +0000 UTC m=+1209.822771560" lastFinishedPulling="2025-11-25 09:56:45.40222325 +0000 UTC m=+1211.255216626" observedRunningTime="2025-11-25 09:56:46.036076808 +0000 UTC m=+1211.889070204" watchObservedRunningTime="2025-11-25 09:56:46.040135329 +0000 UTC m=+1211.893128705" Nov 25 09:56:46 crc kubenswrapper[4854]: I1125 09:56:46.055336 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-b8fbc5445-qf5fg" podStartSLOduration=6.055319276 podStartE2EDuration="6.055319276s" podCreationTimestamp="2025-11-25 09:56:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:56:46.053614499 +0000 UTC m=+1211.906607885" watchObservedRunningTime="2025-11-25 09:56:46.055319276 +0000 UTC m=+1211.908312652" Nov 25 09:56:46 crc kubenswrapper[4854]: I1125 09:56:46.594601 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 25 09:56:46 crc kubenswrapper[4854]: I1125 09:56:46.595742 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 25 09:56:46 crc kubenswrapper[4854]: I1125 09:56:46.689333 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 25 09:56:47 crc kubenswrapper[4854]: I1125 09:56:47.105198 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 25 09:56:47 crc kubenswrapper[4854]: I1125 09:56:47.666540 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-765ccc5d45-ngmp2" podUID="8f308b0a-c5d0-446b-8489-7a8fcdaac38f" containerName="console" containerID="cri-o://3021beef4c4019245c3d2c23497aaf5ab80e22e1596f0096a38f3daae0567676" gracePeriod=15 Nov 25 09:56:47 crc kubenswrapper[4854]: I1125 09:56:47.919998 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-x6mc4"] Nov 25 09:56:47 crc kubenswrapper[4854]: E1125 09:56:47.920709 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fff0203-70fe-435a-b845-cc6e4c321b60" containerName="init" Nov 25 09:56:47 crc kubenswrapper[4854]: I1125 09:56:47.920737 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fff0203-70fe-435a-b845-cc6e4c321b60" containerName="init" Nov 25 09:56:47 crc kubenswrapper[4854]: E1125 09:56:47.920766 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc" containerName="init" Nov 25 09:56:47 crc kubenswrapper[4854]: I1125 09:56:47.920775 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc" containerName="init" Nov 25 09:56:47 crc kubenswrapper[4854]: I1125 09:56:47.921028 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="107c6d3d-44ff-4d05-bea1-4d6cd7a3a8bc" containerName="init" Nov 25 09:56:47 crc kubenswrapper[4854]: I1125 09:56:47.921057 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fff0203-70fe-435a-b845-cc6e4c321b60" containerName="init" Nov 25 09:56:47 crc kubenswrapper[4854]: I1125 09:56:47.922101 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-x6mc4" Nov 25 09:56:47 crc kubenswrapper[4854]: I1125 09:56:47.930726 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-a056-account-create-gtc4f"] Nov 25 09:56:47 crc kubenswrapper[4854]: I1125 09:56:47.932378 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-a056-account-create-gtc4f" Nov 25 09:56:47 crc kubenswrapper[4854]: I1125 09:56:47.934422 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 25 09:56:47 crc kubenswrapper[4854]: I1125 09:56:47.942179 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-x6mc4"] Nov 25 09:56:47 crc kubenswrapper[4854]: I1125 09:56:47.953009 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-a056-account-create-gtc4f"] Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.005956 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7gnqn\" (UniqueName: \"kubernetes.io/projected/76c016ea-a928-4d8a-9936-ca262a75afa4-kube-api-access-7gnqn\") pod \"keystone-a056-account-create-gtc4f\" (UID: \"76c016ea-a928-4d8a-9936-ca262a75afa4\") " pod="openstack/keystone-a056-account-create-gtc4f" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.006041 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/423749f9-fd4e-4d6c-860e-e5d17269da04-operator-scripts\") pod \"keystone-db-create-x6mc4\" (UID: \"423749f9-fd4e-4d6c-860e-e5d17269da04\") " pod="openstack/keystone-db-create-x6mc4" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.006103 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xlbqr\" (UniqueName: \"kubernetes.io/projected/423749f9-fd4e-4d6c-860e-e5d17269da04-kube-api-access-xlbqr\") pod \"keystone-db-create-x6mc4\" (UID: \"423749f9-fd4e-4d6c-860e-e5d17269da04\") " pod="openstack/keystone-db-create-x6mc4" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.006128 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/76c016ea-a928-4d8a-9936-ca262a75afa4-operator-scripts\") pod \"keystone-a056-account-create-gtc4f\" (UID: \"76c016ea-a928-4d8a-9936-ca262a75afa4\") " pod="openstack/keystone-a056-account-create-gtc4f" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.037093 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.037145 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.044602 4854 generic.go:334] "Generic (PLEG): container finished" podID="93c535b4-23bb-4c71-8ddc-1304ca205e55" containerID="a6703b655a254b75add489c6b2c87a50e14d61760651e6996b6ba25d68203c5a" exitCode=0 Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.044695 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"93c535b4-23bb-4c71-8ddc-1304ca205e55","Type":"ContainerDied","Data":"a6703b655a254b75add489c6b2c87a50e14d61760651e6996b6ba25d68203c5a"} Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.047062 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-765ccc5d45-ngmp2_8f308b0a-c5d0-446b-8489-7a8fcdaac38f/console/0.log" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.047101 4854 generic.go:334] "Generic (PLEG): container finished" podID="8f308b0a-c5d0-446b-8489-7a8fcdaac38f" containerID="3021beef4c4019245c3d2c23497aaf5ab80e22e1596f0096a38f3daae0567676" exitCode=2 Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.047435 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-765ccc5d45-ngmp2" event={"ID":"8f308b0a-c5d0-446b-8489-7a8fcdaac38f","Type":"ContainerDied","Data":"3021beef4c4019245c3d2c23497aaf5ab80e22e1596f0096a38f3daae0567676"} Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.108614 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7gnqn\" (UniqueName: \"kubernetes.io/projected/76c016ea-a928-4d8a-9936-ca262a75afa4-kube-api-access-7gnqn\") pod \"keystone-a056-account-create-gtc4f\" (UID: \"76c016ea-a928-4d8a-9936-ca262a75afa4\") " pod="openstack/keystone-a056-account-create-gtc4f" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.108744 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/423749f9-fd4e-4d6c-860e-e5d17269da04-operator-scripts\") pod \"keystone-db-create-x6mc4\" (UID: \"423749f9-fd4e-4d6c-860e-e5d17269da04\") " pod="openstack/keystone-db-create-x6mc4" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.108946 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xlbqr\" (UniqueName: \"kubernetes.io/projected/423749f9-fd4e-4d6c-860e-e5d17269da04-kube-api-access-xlbqr\") pod \"keystone-db-create-x6mc4\" (UID: \"423749f9-fd4e-4d6c-860e-e5d17269da04\") " pod="openstack/keystone-db-create-x6mc4" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.109010 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/76c016ea-a928-4d8a-9936-ca262a75afa4-operator-scripts\") pod \"keystone-a056-account-create-gtc4f\" (UID: \"76c016ea-a928-4d8a-9936-ca262a75afa4\") " pod="openstack/keystone-a056-account-create-gtc4f" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.110479 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/423749f9-fd4e-4d6c-860e-e5d17269da04-operator-scripts\") pod \"keystone-db-create-x6mc4\" (UID: \"423749f9-fd4e-4d6c-860e-e5d17269da04\") " pod="openstack/keystone-db-create-x6mc4" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.111247 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/76c016ea-a928-4d8a-9936-ca262a75afa4-operator-scripts\") pod \"keystone-a056-account-create-gtc4f\" (UID: \"76c016ea-a928-4d8a-9936-ca262a75afa4\") " pod="openstack/keystone-a056-account-create-gtc4f" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.130482 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-rkthx"] Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.135360 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-rkthx" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.143285 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7gnqn\" (UniqueName: \"kubernetes.io/projected/76c016ea-a928-4d8a-9936-ca262a75afa4-kube-api-access-7gnqn\") pod \"keystone-a056-account-create-gtc4f\" (UID: \"76c016ea-a928-4d8a-9936-ca262a75afa4\") " pod="openstack/keystone-a056-account-create-gtc4f" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.150734 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xlbqr\" (UniqueName: \"kubernetes.io/projected/423749f9-fd4e-4d6c-860e-e5d17269da04-kube-api-access-xlbqr\") pod \"keystone-db-create-x6mc4\" (UID: \"423749f9-fd4e-4d6c-860e-e5d17269da04\") " pod="openstack/keystone-db-create-x6mc4" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.153144 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-rkthx"] Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.185177 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-30f6-account-create-v94c4"] Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.187229 4854 patch_prober.go:28] interesting pod/console-765ccc5d45-ngmp2 container/console namespace/openshift-console: Readiness probe status=failure output="Get \"https://10.217.0.91:8443/health\": dial tcp 10.217.0.91:8443: connect: connection refused" start-of-body= Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.188080 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/console-765ccc5d45-ngmp2" podUID="8f308b0a-c5d0-446b-8489-7a8fcdaac38f" containerName="console" probeResult="failure" output="Get \"https://10.217.0.91:8443/health\": dial tcp 10.217.0.91:8443: connect: connection refused" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.188654 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-30f6-account-create-v94c4" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.191653 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.210653 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9dfd5d17-f81b-40bd-82a8-e52a2c1d8429-operator-scripts\") pod \"placement-db-create-rkthx\" (UID: \"9dfd5d17-f81b-40bd-82a8-e52a2c1d8429\") " pod="openstack/placement-db-create-rkthx" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.210763 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f5ngf\" (UniqueName: \"kubernetes.io/projected/9dfd5d17-f81b-40bd-82a8-e52a2c1d8429-kube-api-access-f5ngf\") pod \"placement-db-create-rkthx\" (UID: \"9dfd5d17-f81b-40bd-82a8-e52a2c1d8429\") " pod="openstack/placement-db-create-rkthx" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.212314 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.229770 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-30f6-account-create-v94c4"] Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.259440 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-x6mc4" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.270013 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-a056-account-create-gtc4f" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.312841 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9dfd5d17-f81b-40bd-82a8-e52a2c1d8429-operator-scripts\") pod \"placement-db-create-rkthx\" (UID: \"9dfd5d17-f81b-40bd-82a8-e52a2c1d8429\") " pod="openstack/placement-db-create-rkthx" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.312903 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f5ngf\" (UniqueName: \"kubernetes.io/projected/9dfd5d17-f81b-40bd-82a8-e52a2c1d8429-kube-api-access-f5ngf\") pod \"placement-db-create-rkthx\" (UID: \"9dfd5d17-f81b-40bd-82a8-e52a2c1d8429\") " pod="openstack/placement-db-create-rkthx" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.313077 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5xps\" (UniqueName: \"kubernetes.io/projected/513c3ff8-3514-4d35-bbec-52fa4e2bd363-kube-api-access-g5xps\") pod \"placement-30f6-account-create-v94c4\" (UID: \"513c3ff8-3514-4d35-bbec-52fa4e2bd363\") " pod="openstack/placement-30f6-account-create-v94c4" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.313123 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/513c3ff8-3514-4d35-bbec-52fa4e2bd363-operator-scripts\") pod \"placement-30f6-account-create-v94c4\" (UID: \"513c3ff8-3514-4d35-bbec-52fa4e2bd363\") " pod="openstack/placement-30f6-account-create-v94c4" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.314101 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9dfd5d17-f81b-40bd-82a8-e52a2c1d8429-operator-scripts\") pod \"placement-db-create-rkthx\" (UID: \"9dfd5d17-f81b-40bd-82a8-e52a2c1d8429\") " pod="openstack/placement-db-create-rkthx" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.329119 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f5ngf\" (UniqueName: \"kubernetes.io/projected/9dfd5d17-f81b-40bd-82a8-e52a2c1d8429-kube-api-access-f5ngf\") pod \"placement-db-create-rkthx\" (UID: \"9dfd5d17-f81b-40bd-82a8-e52a2c1d8429\") " pod="openstack/placement-db-create-rkthx" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.414483 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5xps\" (UniqueName: \"kubernetes.io/projected/513c3ff8-3514-4d35-bbec-52fa4e2bd363-kube-api-access-g5xps\") pod \"placement-30f6-account-create-v94c4\" (UID: \"513c3ff8-3514-4d35-bbec-52fa4e2bd363\") " pod="openstack/placement-30f6-account-create-v94c4" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.414781 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/513c3ff8-3514-4d35-bbec-52fa4e2bd363-operator-scripts\") pod \"placement-30f6-account-create-v94c4\" (UID: \"513c3ff8-3514-4d35-bbec-52fa4e2bd363\") " pod="openstack/placement-30f6-account-create-v94c4" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.415612 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/513c3ff8-3514-4d35-bbec-52fa4e2bd363-operator-scripts\") pod \"placement-30f6-account-create-v94c4\" (UID: \"513c3ff8-3514-4d35-bbec-52fa4e2bd363\") " pod="openstack/placement-30f6-account-create-v94c4" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.434248 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5xps\" (UniqueName: \"kubernetes.io/projected/513c3ff8-3514-4d35-bbec-52fa4e2bd363-kube-api-access-g5xps\") pod \"placement-30f6-account-create-v94c4\" (UID: \"513c3ff8-3514-4d35-bbec-52fa4e2bd363\") " pod="openstack/placement-30f6-account-create-v94c4" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.435001 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-6zmvt"] Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.437289 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-6zmvt" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.449320 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-6zmvt"] Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.518428 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6e21e324-1ba4-4b7b-b45c-197db1c1e890-operator-scripts\") pod \"glance-db-create-6zmvt\" (UID: \"6e21e324-1ba4-4b7b-b45c-197db1c1e890\") " pod="openstack/glance-db-create-6zmvt" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.518576 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8b59b\" (UniqueName: \"kubernetes.io/projected/6e21e324-1ba4-4b7b-b45c-197db1c1e890-kube-api-access-8b59b\") pod \"glance-db-create-6zmvt\" (UID: \"6e21e324-1ba4-4b7b-b45c-197db1c1e890\") " pod="openstack/glance-db-create-6zmvt" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.532737 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-rkthx" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.551271 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-30f6-account-create-v94c4" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.594826 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-0eb6-account-create-8djfp"] Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.596342 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-0eb6-account-create-8djfp" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.601527 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.622339 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2af4c5bf-31f7-4a80-8ceb-244ede8ba2b7-operator-scripts\") pod \"glance-0eb6-account-create-8djfp\" (UID: \"2af4c5bf-31f7-4a80-8ceb-244ede8ba2b7\") " pod="openstack/glance-0eb6-account-create-8djfp" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.622596 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6hqxw\" (UniqueName: \"kubernetes.io/projected/2af4c5bf-31f7-4a80-8ceb-244ede8ba2b7-kube-api-access-6hqxw\") pod \"glance-0eb6-account-create-8djfp\" (UID: \"2af4c5bf-31f7-4a80-8ceb-244ede8ba2b7\") " pod="openstack/glance-0eb6-account-create-8djfp" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.622701 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6e21e324-1ba4-4b7b-b45c-197db1c1e890-operator-scripts\") pod \"glance-db-create-6zmvt\" (UID: \"6e21e324-1ba4-4b7b-b45c-197db1c1e890\") " pod="openstack/glance-db-create-6zmvt" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.622788 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8b59b\" (UniqueName: \"kubernetes.io/projected/6e21e324-1ba4-4b7b-b45c-197db1c1e890-kube-api-access-8b59b\") pod \"glance-db-create-6zmvt\" (UID: \"6e21e324-1ba4-4b7b-b45c-197db1c1e890\") " pod="openstack/glance-db-create-6zmvt" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.625018 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6e21e324-1ba4-4b7b-b45c-197db1c1e890-operator-scripts\") pod \"glance-db-create-6zmvt\" (UID: \"6e21e324-1ba4-4b7b-b45c-197db1c1e890\") " pod="openstack/glance-db-create-6zmvt" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.658875 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8b59b\" (UniqueName: \"kubernetes.io/projected/6e21e324-1ba4-4b7b-b45c-197db1c1e890-kube-api-access-8b59b\") pod \"glance-db-create-6zmvt\" (UID: \"6e21e324-1ba4-4b7b-b45c-197db1c1e890\") " pod="openstack/glance-db-create-6zmvt" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.658954 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-0eb6-account-create-8djfp"] Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.729908 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2af4c5bf-31f7-4a80-8ceb-244ede8ba2b7-operator-scripts\") pod \"glance-0eb6-account-create-8djfp\" (UID: \"2af4c5bf-31f7-4a80-8ceb-244ede8ba2b7\") " pod="openstack/glance-0eb6-account-create-8djfp" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.730092 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6hqxw\" (UniqueName: \"kubernetes.io/projected/2af4c5bf-31f7-4a80-8ceb-244ede8ba2b7-kube-api-access-6hqxw\") pod \"glance-0eb6-account-create-8djfp\" (UID: \"2af4c5bf-31f7-4a80-8ceb-244ede8ba2b7\") " pod="openstack/glance-0eb6-account-create-8djfp" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.746414 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2af4c5bf-31f7-4a80-8ceb-244ede8ba2b7-operator-scripts\") pod \"glance-0eb6-account-create-8djfp\" (UID: \"2af4c5bf-31f7-4a80-8ceb-244ede8ba2b7\") " pod="openstack/glance-0eb6-account-create-8djfp" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.760322 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6hqxw\" (UniqueName: \"kubernetes.io/projected/2af4c5bf-31f7-4a80-8ceb-244ede8ba2b7-kube-api-access-6hqxw\") pod \"glance-0eb6-account-create-8djfp\" (UID: \"2af4c5bf-31f7-4a80-8ceb-244ede8ba2b7\") " pod="openstack/glance-0eb6-account-create-8djfp" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.835931 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-6zmvt" Nov 25 09:56:48 crc kubenswrapper[4854]: I1125 09:56:48.953777 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-0eb6-account-create-8djfp" Nov 25 09:56:49 crc kubenswrapper[4854]: I1125 09:56:49.233774 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 25 09:56:49 crc kubenswrapper[4854]: I1125 09:56:49.552852 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/eb6d8324-0633-4891-9a9c-f782e7cec247-etc-swift\") pod \"swift-storage-0\" (UID: \"eb6d8324-0633-4891-9a9c-f782e7cec247\") " pod="openstack/swift-storage-0" Nov 25 09:56:49 crc kubenswrapper[4854]: E1125 09:56:49.553254 4854 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 09:56:49 crc kubenswrapper[4854]: E1125 09:56:49.553288 4854 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 09:56:49 crc kubenswrapper[4854]: E1125 09:56:49.553365 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/eb6d8324-0633-4891-9a9c-f782e7cec247-etc-swift podName:eb6d8324-0633-4891-9a9c-f782e7cec247 nodeName:}" failed. No retries permitted until 2025-11-25 09:56:57.553340026 +0000 UTC m=+1223.406333402 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/eb6d8324-0633-4891-9a9c-f782e7cec247-etc-swift") pod "swift-storage-0" (UID: "eb6d8324-0633-4891-9a9c-f782e7cec247") : configmap "swift-ring-files" not found Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.163620 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-q8skm"] Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.165096 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-q8skm" Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.186793 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-q8skm"] Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.275323 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rn7td\" (UniqueName: \"kubernetes.io/projected/6efa34a3-9747-4cb3-b829-f4d95b402668-kube-api-access-rn7td\") pod \"mysqld-exporter-openstack-db-create-q8skm\" (UID: \"6efa34a3-9747-4cb3-b829-f4d95b402668\") " pod="openstack/mysqld-exporter-openstack-db-create-q8skm" Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.275689 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6efa34a3-9747-4cb3-b829-f4d95b402668-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-q8skm\" (UID: \"6efa34a3-9747-4cb3-b829-f4d95b402668\") " pod="openstack/mysqld-exporter-openstack-db-create-q8skm" Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.377748 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rn7td\" (UniqueName: \"kubernetes.io/projected/6efa34a3-9747-4cb3-b829-f4d95b402668-kube-api-access-rn7td\") pod \"mysqld-exporter-openstack-db-create-q8skm\" (UID: \"6efa34a3-9747-4cb3-b829-f4d95b402668\") " pod="openstack/mysqld-exporter-openstack-db-create-q8skm" Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.377919 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6efa34a3-9747-4cb3-b829-f4d95b402668-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-q8skm\" (UID: \"6efa34a3-9747-4cb3-b829-f4d95b402668\") " pod="openstack/mysqld-exporter-openstack-db-create-q8skm" Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.380362 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-76e1-account-create-dv8dm"] Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.382090 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-76e1-account-create-dv8dm" Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.383637 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6efa34a3-9747-4cb3-b829-f4d95b402668-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-q8skm\" (UID: \"6efa34a3-9747-4cb3-b829-f4d95b402668\") " pod="openstack/mysqld-exporter-openstack-db-create-q8skm" Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.390030 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-openstack-db-secret" Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.391266 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-76e1-account-create-dv8dm"] Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.411938 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rn7td\" (UniqueName: \"kubernetes.io/projected/6efa34a3-9747-4cb3-b829-f4d95b402668-kube-api-access-rn7td\") pod \"mysqld-exporter-openstack-db-create-q8skm\" (UID: \"6efa34a3-9747-4cb3-b829-f4d95b402668\") " pod="openstack/mysqld-exporter-openstack-db-create-q8skm" Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.479822 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wp7fp\" (UniqueName: \"kubernetes.io/projected/c5c9071a-fe25-4ca2-bee6-38fea725d4d4-kube-api-access-wp7fp\") pod \"mysqld-exporter-76e1-account-create-dv8dm\" (UID: \"c5c9071a-fe25-4ca2-bee6-38fea725d4d4\") " pod="openstack/mysqld-exporter-76e1-account-create-dv8dm" Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.479982 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c5c9071a-fe25-4ca2-bee6-38fea725d4d4-operator-scripts\") pod \"mysqld-exporter-76e1-account-create-dv8dm\" (UID: \"c5c9071a-fe25-4ca2-bee6-38fea725d4d4\") " pod="openstack/mysqld-exporter-76e1-account-create-dv8dm" Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.506573 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-q8skm" Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.582153 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wp7fp\" (UniqueName: \"kubernetes.io/projected/c5c9071a-fe25-4ca2-bee6-38fea725d4d4-kube-api-access-wp7fp\") pod \"mysqld-exporter-76e1-account-create-dv8dm\" (UID: \"c5c9071a-fe25-4ca2-bee6-38fea725d4d4\") " pod="openstack/mysqld-exporter-76e1-account-create-dv8dm" Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.582278 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c5c9071a-fe25-4ca2-bee6-38fea725d4d4-operator-scripts\") pod \"mysqld-exporter-76e1-account-create-dv8dm\" (UID: \"c5c9071a-fe25-4ca2-bee6-38fea725d4d4\") " pod="openstack/mysqld-exporter-76e1-account-create-dv8dm" Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.583151 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c5c9071a-fe25-4ca2-bee6-38fea725d4d4-operator-scripts\") pod \"mysqld-exporter-76e1-account-create-dv8dm\" (UID: \"c5c9071a-fe25-4ca2-bee6-38fea725d4d4\") " pod="openstack/mysqld-exporter-76e1-account-create-dv8dm" Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.610550 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wp7fp\" (UniqueName: \"kubernetes.io/projected/c5c9071a-fe25-4ca2-bee6-38fea725d4d4-kube-api-access-wp7fp\") pod \"mysqld-exporter-76e1-account-create-dv8dm\" (UID: \"c5c9071a-fe25-4ca2-bee6-38fea725d4d4\") " pod="openstack/mysqld-exporter-76e1-account-create-dv8dm" Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.611423 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-76e1-account-create-dv8dm" Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.698722 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-765ccc5d45-ngmp2_8f308b0a-c5d0-446b-8489-7a8fcdaac38f/console/0.log" Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.698788 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-765ccc5d45-ngmp2" Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.787345 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-console-config\") pod \"8f308b0a-c5d0-446b-8489-7a8fcdaac38f\" (UID: \"8f308b0a-c5d0-446b-8489-7a8fcdaac38f\") " Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.787776 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-trusted-ca-bundle\") pod \"8f308b0a-c5d0-446b-8489-7a8fcdaac38f\" (UID: \"8f308b0a-c5d0-446b-8489-7a8fcdaac38f\") " Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.787811 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-console-oauth-config\") pod \"8f308b0a-c5d0-446b-8489-7a8fcdaac38f\" (UID: \"8f308b0a-c5d0-446b-8489-7a8fcdaac38f\") " Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.787847 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-service-ca\") pod \"8f308b0a-c5d0-446b-8489-7a8fcdaac38f\" (UID: \"8f308b0a-c5d0-446b-8489-7a8fcdaac38f\") " Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.787869 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mtgff\" (UniqueName: \"kubernetes.io/projected/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-kube-api-access-mtgff\") pod \"8f308b0a-c5d0-446b-8489-7a8fcdaac38f\" (UID: \"8f308b0a-c5d0-446b-8489-7a8fcdaac38f\") " Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.787894 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-console-serving-cert\") pod \"8f308b0a-c5d0-446b-8489-7a8fcdaac38f\" (UID: \"8f308b0a-c5d0-446b-8489-7a8fcdaac38f\") " Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.788014 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-oauth-serving-cert\") pod \"8f308b0a-c5d0-446b-8489-7a8fcdaac38f\" (UID: \"8f308b0a-c5d0-446b-8489-7a8fcdaac38f\") " Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.789115 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-console-config" (OuterVolumeSpecName: "console-config") pod "8f308b0a-c5d0-446b-8489-7a8fcdaac38f" (UID: "8f308b0a-c5d0-446b-8489-7a8fcdaac38f"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.790059 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "8f308b0a-c5d0-446b-8489-7a8fcdaac38f" (UID: "8f308b0a-c5d0-446b-8489-7a8fcdaac38f"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.794522 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "8f308b0a-c5d0-446b-8489-7a8fcdaac38f" (UID: "8f308b0a-c5d0-446b-8489-7a8fcdaac38f"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.796332 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-service-ca" (OuterVolumeSpecName: "service-ca") pod "8f308b0a-c5d0-446b-8489-7a8fcdaac38f" (UID: "8f308b0a-c5d0-446b-8489-7a8fcdaac38f"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.806245 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "8f308b0a-c5d0-446b-8489-7a8fcdaac38f" (UID: "8f308b0a-c5d0-446b-8489-7a8fcdaac38f"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.807740 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "8f308b0a-c5d0-446b-8489-7a8fcdaac38f" (UID: "8f308b0a-c5d0-446b-8489-7a8fcdaac38f"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.808047 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-kube-api-access-mtgff" (OuterVolumeSpecName: "kube-api-access-mtgff") pod "8f308b0a-c5d0-446b-8489-7a8fcdaac38f" (UID: "8f308b0a-c5d0-446b-8489-7a8fcdaac38f"). InnerVolumeSpecName "kube-api-access-mtgff". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.892176 4854 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.892215 4854 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-console-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.892224 4854 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.892232 4854 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.892242 4854 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.892253 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mtgff\" (UniqueName: \"kubernetes.io/projected/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-kube-api-access-mtgff\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.892262 4854 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/8f308b0a-c5d0-446b-8489-7a8fcdaac38f-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:50 crc kubenswrapper[4854]: I1125 09:56:50.993830 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-b8fbc5445-qf5fg" Nov 25 09:56:51 crc kubenswrapper[4854]: I1125 09:56:51.050459 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-twfb7"] Nov 25 09:56:51 crc kubenswrapper[4854]: I1125 09:56:51.050707 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57d769cc4f-twfb7" podUID="2cd76364-963f-4af7-83ef-cc73ab247e14" containerName="dnsmasq-dns" containerID="cri-o://cc7713cc234cec681d1047ff973362fca609d892367bc105deee7ceb1a3a3666" gracePeriod=10 Nov 25 09:56:51 crc kubenswrapper[4854]: I1125 09:56:51.086792 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-765ccc5d45-ngmp2_8f308b0a-c5d0-446b-8489-7a8fcdaac38f/console/0.log" Nov 25 09:56:51 crc kubenswrapper[4854]: I1125 09:56:51.087073 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-765ccc5d45-ngmp2" event={"ID":"8f308b0a-c5d0-446b-8489-7a8fcdaac38f","Type":"ContainerDied","Data":"8aad63e6476398bc868af80c7b2f0eff7e3d3ad4e66f216053f192f4d9e6ad22"} Nov 25 09:56:51 crc kubenswrapper[4854]: I1125 09:56:51.087112 4854 scope.go:117] "RemoveContainer" containerID="3021beef4c4019245c3d2c23497aaf5ab80e22e1596f0096a38f3daae0567676" Nov 25 09:56:51 crc kubenswrapper[4854]: I1125 09:56:51.087254 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-765ccc5d45-ngmp2" Nov 25 09:56:51 crc kubenswrapper[4854]: I1125 09:56:51.107956 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-m8t44" event={"ID":"5a1d05ce-0c76-4823-a86f-004ea7655be9","Type":"ContainerStarted","Data":"4909e284025c019814d0dc52446700a46770988d57beca1d9f26b8979dae0854"} Nov 25 09:56:51 crc kubenswrapper[4854]: I1125 09:56:51.131520 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-m8t44" podStartSLOduration=2.636157454 podStartE2EDuration="9.13150079s" podCreationTimestamp="2025-11-25 09:56:42 +0000 UTC" firstStartedPulling="2025-11-25 09:56:43.961446385 +0000 UTC m=+1209.814439761" lastFinishedPulling="2025-11-25 09:56:50.456789721 +0000 UTC m=+1216.309783097" observedRunningTime="2025-11-25 09:56:51.127999123 +0000 UTC m=+1216.980992509" watchObservedRunningTime="2025-11-25 09:56:51.13150079 +0000 UTC m=+1216.984494166" Nov 25 09:56:51 crc kubenswrapper[4854]: I1125 09:56:51.200479 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-765ccc5d45-ngmp2"] Nov 25 09:56:51 crc kubenswrapper[4854]: I1125 09:56:51.220888 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-765ccc5d45-ngmp2"] Nov 25 09:56:51 crc kubenswrapper[4854]: I1125 09:56:51.359750 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-rkthx"] Nov 25 09:56:51 crc kubenswrapper[4854]: I1125 09:56:51.395055 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-a056-account-create-gtc4f"] Nov 25 09:56:51 crc kubenswrapper[4854]: I1125 09:56:51.665212 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-x6mc4"] Nov 25 09:56:51 crc kubenswrapper[4854]: W1125 09:56:51.671516 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2af4c5bf_31f7_4a80_8ceb_244ede8ba2b7.slice/crio-8ebf1f448b41a9a88d4de39e3c435f9083ffc021e39d381bd0414c49deccd122 WatchSource:0}: Error finding container 8ebf1f448b41a9a88d4de39e3c435f9083ffc021e39d381bd0414c49deccd122: Status 404 returned error can't find the container with id 8ebf1f448b41a9a88d4de39e3c435f9083ffc021e39d381bd0414c49deccd122 Nov 25 09:56:51 crc kubenswrapper[4854]: W1125 09:56:51.674776 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod423749f9_fd4e_4d6c_860e_e5d17269da04.slice/crio-e7c89ac8ecfcd2adf186a7c8c215c11b4d9da716ceb78378f053ae3858eb0e06 WatchSource:0}: Error finding container e7c89ac8ecfcd2adf186a7c8c215c11b4d9da716ceb78378f053ae3858eb0e06: Status 404 returned error can't find the container with id e7c89ac8ecfcd2adf186a7c8c215c11b4d9da716ceb78378f053ae3858eb0e06 Nov 25 09:56:51 crc kubenswrapper[4854]: I1125 09:56:51.675896 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-0eb6-account-create-8djfp"] Nov 25 09:56:51 crc kubenswrapper[4854]: I1125 09:56:51.686039 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-30f6-account-create-v94c4"] Nov 25 09:56:51 crc kubenswrapper[4854]: I1125 09:56:51.948753 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-q8skm"] Nov 25 09:56:51 crc kubenswrapper[4854]: I1125 09:56:51.975309 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-76e1-account-create-dv8dm"] Nov 25 09:56:51 crc kubenswrapper[4854]: I1125 09:56:51.984461 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-6zmvt"] Nov 25 09:56:51 crc kubenswrapper[4854]: W1125 09:56:51.993061 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6efa34a3_9747_4cb3_b829_f4d95b402668.slice/crio-b0edd73793a21c2de59215329a18a8163a4875a8fef931c175caf56509286c3e WatchSource:0}: Error finding container b0edd73793a21c2de59215329a18a8163a4875a8fef931c175caf56509286c3e: Status 404 returned error can't find the container with id b0edd73793a21c2de59215329a18a8163a4875a8fef931c175caf56509286c3e Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.145748 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-76e1-account-create-dv8dm" event={"ID":"c5c9071a-fe25-4ca2-bee6-38fea725d4d4","Type":"ContainerStarted","Data":"aa01ec6102e9ba4eeb7b19d82658c2b66ed4700333007ce696fad61792b66682"} Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.151202 4854 generic.go:334] "Generic (PLEG): container finished" podID="9dfd5d17-f81b-40bd-82a8-e52a2c1d8429" containerID="6a5eac02fed633d574667927717afeda3d68015787b61fa294d5e9ca90f59202" exitCode=0 Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.151331 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-rkthx" event={"ID":"9dfd5d17-f81b-40bd-82a8-e52a2c1d8429","Type":"ContainerDied","Data":"6a5eac02fed633d574667927717afeda3d68015787b61fa294d5e9ca90f59202"} Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.151367 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-rkthx" event={"ID":"9dfd5d17-f81b-40bd-82a8-e52a2c1d8429","Type":"ContainerStarted","Data":"f44c01932193569fb380b9d696601532c9de1b4814bd493bed94bc1b80ab368d"} Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.155403 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-q8skm" event={"ID":"6efa34a3-9747-4cb3-b829-f4d95b402668","Type":"ContainerStarted","Data":"b0edd73793a21c2de59215329a18a8163a4875a8fef931c175caf56509286c3e"} Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.161548 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-twfb7" Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.162137 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-30f6-account-create-v94c4" event={"ID":"513c3ff8-3514-4d35-bbec-52fa4e2bd363","Type":"ContainerStarted","Data":"baff4ed1020036b4da8754ee2e1ab5e73579e8d40fcdca9d941d9806c183fefb"} Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.162169 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-30f6-account-create-v94c4" event={"ID":"513c3ff8-3514-4d35-bbec-52fa4e2bd363","Type":"ContainerStarted","Data":"1ef42659bafd3f2d7ddd3b91950f00059e42490bb2c34e1aeb433ca88522d046"} Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.169399 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-6zmvt" event={"ID":"6e21e324-1ba4-4b7b-b45c-197db1c1e890","Type":"ContainerStarted","Data":"e53a6c83affa25c02beb6ec3024597aafbc7c408ba2c821073015e24ede3b9b4"} Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.174617 4854 generic.go:334] "Generic (PLEG): container finished" podID="2cd76364-963f-4af7-83ef-cc73ab247e14" containerID="cc7713cc234cec681d1047ff973362fca609d892367bc105deee7ceb1a3a3666" exitCode=0 Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.174789 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-twfb7" Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.175079 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-twfb7" event={"ID":"2cd76364-963f-4af7-83ef-cc73ab247e14","Type":"ContainerDied","Data":"cc7713cc234cec681d1047ff973362fca609d892367bc105deee7ceb1a3a3666"} Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.175121 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-twfb7" event={"ID":"2cd76364-963f-4af7-83ef-cc73ab247e14","Type":"ContainerDied","Data":"59fb80ff18aeaee7868e2bdce5b76aac5ad3e32c43e51fcb566af29d3cec3e32"} Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.175201 4854 scope.go:117] "RemoveContainer" containerID="cc7713cc234cec681d1047ff973362fca609d892367bc105deee7ceb1a3a3666" Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.183586 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-0eb6-account-create-8djfp" event={"ID":"2af4c5bf-31f7-4a80-8ceb-244ede8ba2b7","Type":"ContainerStarted","Data":"e95af3f44186f5bed8bf345c0f41ab3788ca2cc184c1c349f1f45e4f9523409d"} Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.183656 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-0eb6-account-create-8djfp" event={"ID":"2af4c5bf-31f7-4a80-8ceb-244ede8ba2b7","Type":"ContainerStarted","Data":"8ebf1f448b41a9a88d4de39e3c435f9083ffc021e39d381bd0414c49deccd122"} Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.192951 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-x6mc4" event={"ID":"423749f9-fd4e-4d6c-860e-e5d17269da04","Type":"ContainerStarted","Data":"97d599ea366a8550312d92c5eea199fdc8b2b4d99d9cbfe4b539e537f84f65f3"} Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.193943 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-x6mc4" event={"ID":"423749f9-fd4e-4d6c-860e-e5d17269da04","Type":"ContainerStarted","Data":"e7c89ac8ecfcd2adf186a7c8c215c11b4d9da716ceb78378f053ae3858eb0e06"} Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.205897 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-30f6-account-create-v94c4" podStartSLOduration=4.205876762 podStartE2EDuration="4.205876762s" podCreationTimestamp="2025-11-25 09:56:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:56:52.190418119 +0000 UTC m=+1218.043411515" watchObservedRunningTime="2025-11-25 09:56:52.205876762 +0000 UTC m=+1218.058870138" Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.214925 4854 generic.go:334] "Generic (PLEG): container finished" podID="76c016ea-a928-4d8a-9936-ca262a75afa4" containerID="34a4f1590eff5ac4b1144b819215d001a64f93e44eed16b531bb27693637c721" exitCode=0 Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.215164 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-a056-account-create-gtc4f" event={"ID":"76c016ea-a928-4d8a-9936-ca262a75afa4","Type":"ContainerDied","Data":"34a4f1590eff5ac4b1144b819215d001a64f93e44eed16b531bb27693637c721"} Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.215201 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-a056-account-create-gtc4f" event={"ID":"76c016ea-a928-4d8a-9936-ca262a75afa4","Type":"ContainerStarted","Data":"d95dd1ca4d2d3cf406e7ff64978ecfeaaa33722659ae3afa07628d21bfdb920e"} Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.244443 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f5v5r\" (UniqueName: \"kubernetes.io/projected/2cd76364-963f-4af7-83ef-cc73ab247e14-kube-api-access-f5v5r\") pod \"2cd76364-963f-4af7-83ef-cc73ab247e14\" (UID: \"2cd76364-963f-4af7-83ef-cc73ab247e14\") " Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.244513 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2cd76364-963f-4af7-83ef-cc73ab247e14-dns-svc\") pod \"2cd76364-963f-4af7-83ef-cc73ab247e14\" (UID: \"2cd76364-963f-4af7-83ef-cc73ab247e14\") " Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.244827 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2cd76364-963f-4af7-83ef-cc73ab247e14-config\") pod \"2cd76364-963f-4af7-83ef-cc73ab247e14\" (UID: \"2cd76364-963f-4af7-83ef-cc73ab247e14\") " Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.246336 4854 scope.go:117] "RemoveContainer" containerID="f863907b32ffb9e7f37284c188c57e7a922df4e7093e7d6ecc4e4896fa5d8575" Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.257113 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-create-x6mc4" podStartSLOduration=5.257084568 podStartE2EDuration="5.257084568s" podCreationTimestamp="2025-11-25 09:56:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:56:52.246782415 +0000 UTC m=+1218.099775801" watchObservedRunningTime="2025-11-25 09:56:52.257084568 +0000 UTC m=+1218.110077944" Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.262984 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-0eb6-account-create-8djfp" podStartSLOduration=4.262960729 podStartE2EDuration="4.262960729s" podCreationTimestamp="2025-11-25 09:56:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:56:52.261345884 +0000 UTC m=+1218.114339260" watchObservedRunningTime="2025-11-25 09:56:52.262960729 +0000 UTC m=+1218.115954105" Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.268659 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2cd76364-963f-4af7-83ef-cc73ab247e14-kube-api-access-f5v5r" (OuterVolumeSpecName: "kube-api-access-f5v5r") pod "2cd76364-963f-4af7-83ef-cc73ab247e14" (UID: "2cd76364-963f-4af7-83ef-cc73ab247e14"). InnerVolumeSpecName "kube-api-access-f5v5r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.348150 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f5v5r\" (UniqueName: \"kubernetes.io/projected/2cd76364-963f-4af7-83ef-cc73ab247e14-kube-api-access-f5v5r\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.403039 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2cd76364-963f-4af7-83ef-cc73ab247e14-config" (OuterVolumeSpecName: "config") pod "2cd76364-963f-4af7-83ef-cc73ab247e14" (UID: "2cd76364-963f-4af7-83ef-cc73ab247e14"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.412957 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2cd76364-963f-4af7-83ef-cc73ab247e14-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2cd76364-963f-4af7-83ef-cc73ab247e14" (UID: "2cd76364-963f-4af7-83ef-cc73ab247e14"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.454347 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2cd76364-963f-4af7-83ef-cc73ab247e14-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.454389 4854 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2cd76364-963f-4af7-83ef-cc73ab247e14-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.735157 4854 scope.go:117] "RemoveContainer" containerID="cc7713cc234cec681d1047ff973362fca609d892367bc105deee7ceb1a3a3666" Nov 25 09:56:52 crc kubenswrapper[4854]: E1125 09:56:52.735515 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cc7713cc234cec681d1047ff973362fca609d892367bc105deee7ceb1a3a3666\": container with ID starting with cc7713cc234cec681d1047ff973362fca609d892367bc105deee7ceb1a3a3666 not found: ID does not exist" containerID="cc7713cc234cec681d1047ff973362fca609d892367bc105deee7ceb1a3a3666" Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.735546 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cc7713cc234cec681d1047ff973362fca609d892367bc105deee7ceb1a3a3666"} err="failed to get container status \"cc7713cc234cec681d1047ff973362fca609d892367bc105deee7ceb1a3a3666\": rpc error: code = NotFound desc = could not find container \"cc7713cc234cec681d1047ff973362fca609d892367bc105deee7ceb1a3a3666\": container with ID starting with cc7713cc234cec681d1047ff973362fca609d892367bc105deee7ceb1a3a3666 not found: ID does not exist" Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.735565 4854 scope.go:117] "RemoveContainer" containerID="f863907b32ffb9e7f37284c188c57e7a922df4e7093e7d6ecc4e4896fa5d8575" Nov 25 09:56:52 crc kubenswrapper[4854]: E1125 09:56:52.735728 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f863907b32ffb9e7f37284c188c57e7a922df4e7093e7d6ecc4e4896fa5d8575\": container with ID starting with f863907b32ffb9e7f37284c188c57e7a922df4e7093e7d6ecc4e4896fa5d8575 not found: ID does not exist" containerID="f863907b32ffb9e7f37284c188c57e7a922df4e7093e7d6ecc4e4896fa5d8575" Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.735751 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f863907b32ffb9e7f37284c188c57e7a922df4e7093e7d6ecc4e4896fa5d8575"} err="failed to get container status \"f863907b32ffb9e7f37284c188c57e7a922df4e7093e7d6ecc4e4896fa5d8575\": rpc error: code = NotFound desc = could not find container \"f863907b32ffb9e7f37284c188c57e7a922df4e7093e7d6ecc4e4896fa5d8575\": container with ID starting with f863907b32ffb9e7f37284c188c57e7a922df4e7093e7d6ecc4e4896fa5d8575 not found: ID does not exist" Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.805294 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-twfb7"] Nov 25 09:56:52 crc kubenswrapper[4854]: I1125 09:56:52.813407 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-twfb7"] Nov 25 09:56:53 crc kubenswrapper[4854]: I1125 09:56:53.024699 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2cd76364-963f-4af7-83ef-cc73ab247e14" path="/var/lib/kubelet/pods/2cd76364-963f-4af7-83ef-cc73ab247e14/volumes" Nov 25 09:56:53 crc kubenswrapper[4854]: I1125 09:56:53.025635 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f308b0a-c5d0-446b-8489-7a8fcdaac38f" path="/var/lib/kubelet/pods/8f308b0a-c5d0-446b-8489-7a8fcdaac38f/volumes" Nov 25 09:56:53 crc kubenswrapper[4854]: I1125 09:56:53.227031 4854 generic.go:334] "Generic (PLEG): container finished" podID="c5c9071a-fe25-4ca2-bee6-38fea725d4d4" containerID="50fa2b39aafe9ed2f2f7e4c5f45556b0fa77418cdb7b4a598605b75e614df720" exitCode=0 Nov 25 09:56:53 crc kubenswrapper[4854]: I1125 09:56:53.227761 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-76e1-account-create-dv8dm" event={"ID":"c5c9071a-fe25-4ca2-bee6-38fea725d4d4","Type":"ContainerDied","Data":"50fa2b39aafe9ed2f2f7e4c5f45556b0fa77418cdb7b4a598605b75e614df720"} Nov 25 09:56:53 crc kubenswrapper[4854]: I1125 09:56:53.229606 4854 generic.go:334] "Generic (PLEG): container finished" podID="6e21e324-1ba4-4b7b-b45c-197db1c1e890" containerID="8da2179eb6b2be245fef4ebcc09c6df1ec6b558a1898860c982fcbef041f73cd" exitCode=0 Nov 25 09:56:53 crc kubenswrapper[4854]: I1125 09:56:53.229700 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-6zmvt" event={"ID":"6e21e324-1ba4-4b7b-b45c-197db1c1e890","Type":"ContainerDied","Data":"8da2179eb6b2be245fef4ebcc09c6df1ec6b558a1898860c982fcbef041f73cd"} Nov 25 09:56:53 crc kubenswrapper[4854]: I1125 09:56:53.232719 4854 generic.go:334] "Generic (PLEG): container finished" podID="2af4c5bf-31f7-4a80-8ceb-244ede8ba2b7" containerID="e95af3f44186f5bed8bf345c0f41ab3788ca2cc184c1c349f1f45e4f9523409d" exitCode=0 Nov 25 09:56:53 crc kubenswrapper[4854]: I1125 09:56:53.232768 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-0eb6-account-create-8djfp" event={"ID":"2af4c5bf-31f7-4a80-8ceb-244ede8ba2b7","Type":"ContainerDied","Data":"e95af3f44186f5bed8bf345c0f41ab3788ca2cc184c1c349f1f45e4f9523409d"} Nov 25 09:56:53 crc kubenswrapper[4854]: I1125 09:56:53.233983 4854 generic.go:334] "Generic (PLEG): container finished" podID="6efa34a3-9747-4cb3-b829-f4d95b402668" containerID="6bc97d0196bd758efdf5f832f16030608ad6c721d833a8496314b304a93ee311" exitCode=0 Nov 25 09:56:53 crc kubenswrapper[4854]: I1125 09:56:53.234020 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-q8skm" event={"ID":"6efa34a3-9747-4cb3-b829-f4d95b402668","Type":"ContainerDied","Data":"6bc97d0196bd758efdf5f832f16030608ad6c721d833a8496314b304a93ee311"} Nov 25 09:56:53 crc kubenswrapper[4854]: I1125 09:56:53.235420 4854 generic.go:334] "Generic (PLEG): container finished" podID="423749f9-fd4e-4d6c-860e-e5d17269da04" containerID="97d599ea366a8550312d92c5eea199fdc8b2b4d99d9cbfe4b539e537f84f65f3" exitCode=0 Nov 25 09:56:53 crc kubenswrapper[4854]: I1125 09:56:53.235503 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-x6mc4" event={"ID":"423749f9-fd4e-4d6c-860e-e5d17269da04","Type":"ContainerDied","Data":"97d599ea366a8550312d92c5eea199fdc8b2b4d99d9cbfe4b539e537f84f65f3"} Nov 25 09:56:53 crc kubenswrapper[4854]: I1125 09:56:53.237686 4854 generic.go:334] "Generic (PLEG): container finished" podID="513c3ff8-3514-4d35-bbec-52fa4e2bd363" containerID="baff4ed1020036b4da8754ee2e1ab5e73579e8d40fcdca9d941d9806c183fefb" exitCode=0 Nov 25 09:56:53 crc kubenswrapper[4854]: I1125 09:56:53.237726 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-30f6-account-create-v94c4" event={"ID":"513c3ff8-3514-4d35-bbec-52fa4e2bd363","Type":"ContainerDied","Data":"baff4ed1020036b4da8754ee2e1ab5e73579e8d40fcdca9d941d9806c183fefb"} Nov 25 09:56:53 crc kubenswrapper[4854]: I1125 09:56:53.725748 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-rkthx" Nov 25 09:56:53 crc kubenswrapper[4854]: I1125 09:56:53.824587 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-a056-account-create-gtc4f" Nov 25 09:56:53 crc kubenswrapper[4854]: I1125 09:56:53.897703 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9dfd5d17-f81b-40bd-82a8-e52a2c1d8429-operator-scripts\") pod \"9dfd5d17-f81b-40bd-82a8-e52a2c1d8429\" (UID: \"9dfd5d17-f81b-40bd-82a8-e52a2c1d8429\") " Nov 25 09:56:53 crc kubenswrapper[4854]: I1125 09:56:53.899996 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f5ngf\" (UniqueName: \"kubernetes.io/projected/9dfd5d17-f81b-40bd-82a8-e52a2c1d8429-kube-api-access-f5ngf\") pod \"9dfd5d17-f81b-40bd-82a8-e52a2c1d8429\" (UID: \"9dfd5d17-f81b-40bd-82a8-e52a2c1d8429\") " Nov 25 09:56:53 crc kubenswrapper[4854]: I1125 09:56:53.904240 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9dfd5d17-f81b-40bd-82a8-e52a2c1d8429-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9dfd5d17-f81b-40bd-82a8-e52a2c1d8429" (UID: "9dfd5d17-f81b-40bd-82a8-e52a2c1d8429"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:56:53 crc kubenswrapper[4854]: I1125 09:56:53.905633 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9dfd5d17-f81b-40bd-82a8-e52a2c1d8429-kube-api-access-f5ngf" (OuterVolumeSpecName: "kube-api-access-f5ngf") pod "9dfd5d17-f81b-40bd-82a8-e52a2c1d8429" (UID: "9dfd5d17-f81b-40bd-82a8-e52a2c1d8429"). InnerVolumeSpecName "kube-api-access-f5ngf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:56:54 crc kubenswrapper[4854]: I1125 09:56:54.002615 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/76c016ea-a928-4d8a-9936-ca262a75afa4-operator-scripts\") pod \"76c016ea-a928-4d8a-9936-ca262a75afa4\" (UID: \"76c016ea-a928-4d8a-9936-ca262a75afa4\") " Nov 25 09:56:54 crc kubenswrapper[4854]: I1125 09:56:54.002895 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7gnqn\" (UniqueName: \"kubernetes.io/projected/76c016ea-a928-4d8a-9936-ca262a75afa4-kube-api-access-7gnqn\") pod \"76c016ea-a928-4d8a-9936-ca262a75afa4\" (UID: \"76c016ea-a928-4d8a-9936-ca262a75afa4\") " Nov 25 09:56:54 crc kubenswrapper[4854]: I1125 09:56:54.003258 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/76c016ea-a928-4d8a-9936-ca262a75afa4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "76c016ea-a928-4d8a-9936-ca262a75afa4" (UID: "76c016ea-a928-4d8a-9936-ca262a75afa4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:56:54 crc kubenswrapper[4854]: I1125 09:56:54.003844 4854 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9dfd5d17-f81b-40bd-82a8-e52a2c1d8429-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:54 crc kubenswrapper[4854]: I1125 09:56:54.003870 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f5ngf\" (UniqueName: \"kubernetes.io/projected/9dfd5d17-f81b-40bd-82a8-e52a2c1d8429-kube-api-access-f5ngf\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:54 crc kubenswrapper[4854]: I1125 09:56:54.003885 4854 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/76c016ea-a928-4d8a-9936-ca262a75afa4-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:54 crc kubenswrapper[4854]: I1125 09:56:54.007394 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/76c016ea-a928-4d8a-9936-ca262a75afa4-kube-api-access-7gnqn" (OuterVolumeSpecName: "kube-api-access-7gnqn") pod "76c016ea-a928-4d8a-9936-ca262a75afa4" (UID: "76c016ea-a928-4d8a-9936-ca262a75afa4"). InnerVolumeSpecName "kube-api-access-7gnqn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:56:54 crc kubenswrapper[4854]: I1125 09:56:54.106450 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7gnqn\" (UniqueName: \"kubernetes.io/projected/76c016ea-a928-4d8a-9936-ca262a75afa4-kube-api-access-7gnqn\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:54 crc kubenswrapper[4854]: I1125 09:56:54.249221 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-rkthx" event={"ID":"9dfd5d17-f81b-40bd-82a8-e52a2c1d8429","Type":"ContainerDied","Data":"f44c01932193569fb380b9d696601532c9de1b4814bd493bed94bc1b80ab368d"} Nov 25 09:56:54 crc kubenswrapper[4854]: I1125 09:56:54.249264 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f44c01932193569fb380b9d696601532c9de1b4814bd493bed94bc1b80ab368d" Nov 25 09:56:54 crc kubenswrapper[4854]: I1125 09:56:54.249330 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-rkthx" Nov 25 09:56:54 crc kubenswrapper[4854]: I1125 09:56:54.254646 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-a056-account-create-gtc4f" Nov 25 09:56:54 crc kubenswrapper[4854]: I1125 09:56:54.255228 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-a056-account-create-gtc4f" event={"ID":"76c016ea-a928-4d8a-9936-ca262a75afa4","Type":"ContainerDied","Data":"d95dd1ca4d2d3cf406e7ff64978ecfeaaa33722659ae3afa07628d21bfdb920e"} Nov 25 09:56:54 crc kubenswrapper[4854]: I1125 09:56:54.255372 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d95dd1ca4d2d3cf406e7ff64978ecfeaaa33722659ae3afa07628d21bfdb920e" Nov 25 09:56:54 crc kubenswrapper[4854]: I1125 09:56:54.770244 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-0eb6-account-create-8djfp" Nov 25 09:56:54 crc kubenswrapper[4854]: I1125 09:56:54.933424 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6hqxw\" (UniqueName: \"kubernetes.io/projected/2af4c5bf-31f7-4a80-8ceb-244ede8ba2b7-kube-api-access-6hqxw\") pod \"2af4c5bf-31f7-4a80-8ceb-244ede8ba2b7\" (UID: \"2af4c5bf-31f7-4a80-8ceb-244ede8ba2b7\") " Nov 25 09:56:54 crc kubenswrapper[4854]: I1125 09:56:54.933828 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2af4c5bf-31f7-4a80-8ceb-244ede8ba2b7-operator-scripts\") pod \"2af4c5bf-31f7-4a80-8ceb-244ede8ba2b7\" (UID: \"2af4c5bf-31f7-4a80-8ceb-244ede8ba2b7\") " Nov 25 09:56:54 crc kubenswrapper[4854]: I1125 09:56:54.948225 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2af4c5bf-31f7-4a80-8ceb-244ede8ba2b7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2af4c5bf-31f7-4a80-8ceb-244ede8ba2b7" (UID: "2af4c5bf-31f7-4a80-8ceb-244ede8ba2b7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.005934 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2af4c5bf-31f7-4a80-8ceb-244ede8ba2b7-kube-api-access-6hqxw" (OuterVolumeSpecName: "kube-api-access-6hqxw") pod "2af4c5bf-31f7-4a80-8ceb-244ede8ba2b7" (UID: "2af4c5bf-31f7-4a80-8ceb-244ede8ba2b7"). InnerVolumeSpecName "kube-api-access-6hqxw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.036135 4854 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2af4c5bf-31f7-4a80-8ceb-244ede8ba2b7-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.036420 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6hqxw\" (UniqueName: \"kubernetes.io/projected/2af4c5bf-31f7-4a80-8ceb-244ede8ba2b7-kube-api-access-6hqxw\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.250703 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-x6mc4" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.257934 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-30f6-account-create-v94c4" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.276053 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-x6mc4" event={"ID":"423749f9-fd4e-4d6c-860e-e5d17269da04","Type":"ContainerDied","Data":"e7c89ac8ecfcd2adf186a7c8c215c11b4d9da716ceb78378f053ae3858eb0e06"} Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.276173 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e7c89ac8ecfcd2adf186a7c8c215c11b4d9da716ceb78378f053ae3858eb0e06" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.276115 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-x6mc4" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.286386 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-6zmvt" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.287082 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-30f6-account-create-v94c4" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.287143 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-30f6-account-create-v94c4" event={"ID":"513c3ff8-3514-4d35-bbec-52fa4e2bd363","Type":"ContainerDied","Data":"1ef42659bafd3f2d7ddd3b91950f00059e42490bb2c34e1aeb433ca88522d046"} Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.287178 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1ef42659bafd3f2d7ddd3b91950f00059e42490bb2c34e1aeb433ca88522d046" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.288000 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-76e1-account-create-dv8dm" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.295442 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-76e1-account-create-dv8dm" event={"ID":"c5c9071a-fe25-4ca2-bee6-38fea725d4d4","Type":"ContainerDied","Data":"aa01ec6102e9ba4eeb7b19d82658c2b66ed4700333007ce696fad61792b66682"} Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.295481 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aa01ec6102e9ba4eeb7b19d82658c2b66ed4700333007ce696fad61792b66682" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.303418 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-6zmvt" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.303640 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-q8skm" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.303789 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-6zmvt" event={"ID":"6e21e324-1ba4-4b7b-b45c-197db1c1e890","Type":"ContainerDied","Data":"e53a6c83affa25c02beb6ec3024597aafbc7c408ba2c821073015e24ede3b9b4"} Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.303819 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e53a6c83affa25c02beb6ec3024597aafbc7c408ba2c821073015e24ede3b9b4" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.305079 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-0eb6-account-create-8djfp" event={"ID":"2af4c5bf-31f7-4a80-8ceb-244ede8ba2b7","Type":"ContainerDied","Data":"8ebf1f448b41a9a88d4de39e3c435f9083ffc021e39d381bd0414c49deccd122"} Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.305106 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8ebf1f448b41a9a88d4de39e3c435f9083ffc021e39d381bd0414c49deccd122" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.305219 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-0eb6-account-create-8djfp" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.315657 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-q8skm" event={"ID":"6efa34a3-9747-4cb3-b829-f4d95b402668","Type":"ContainerDied","Data":"b0edd73793a21c2de59215329a18a8163a4875a8fef931c175caf56509286c3e"} Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.315705 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b0edd73793a21c2de59215329a18a8163a4875a8fef931c175caf56509286c3e" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.315755 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-q8skm" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.342019 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6e21e324-1ba4-4b7b-b45c-197db1c1e890-operator-scripts\") pod \"6e21e324-1ba4-4b7b-b45c-197db1c1e890\" (UID: \"6e21e324-1ba4-4b7b-b45c-197db1c1e890\") " Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.342088 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g5xps\" (UniqueName: \"kubernetes.io/projected/513c3ff8-3514-4d35-bbec-52fa4e2bd363-kube-api-access-g5xps\") pod \"513c3ff8-3514-4d35-bbec-52fa4e2bd363\" (UID: \"513c3ff8-3514-4d35-bbec-52fa4e2bd363\") " Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.342122 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xlbqr\" (UniqueName: \"kubernetes.io/projected/423749f9-fd4e-4d6c-860e-e5d17269da04-kube-api-access-xlbqr\") pod \"423749f9-fd4e-4d6c-860e-e5d17269da04\" (UID: \"423749f9-fd4e-4d6c-860e-e5d17269da04\") " Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.342161 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wp7fp\" (UniqueName: \"kubernetes.io/projected/c5c9071a-fe25-4ca2-bee6-38fea725d4d4-kube-api-access-wp7fp\") pod \"c5c9071a-fe25-4ca2-bee6-38fea725d4d4\" (UID: \"c5c9071a-fe25-4ca2-bee6-38fea725d4d4\") " Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.342235 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/423749f9-fd4e-4d6c-860e-e5d17269da04-operator-scripts\") pod \"423749f9-fd4e-4d6c-860e-e5d17269da04\" (UID: \"423749f9-fd4e-4d6c-860e-e5d17269da04\") " Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.342299 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8b59b\" (UniqueName: \"kubernetes.io/projected/6e21e324-1ba4-4b7b-b45c-197db1c1e890-kube-api-access-8b59b\") pod \"6e21e324-1ba4-4b7b-b45c-197db1c1e890\" (UID: \"6e21e324-1ba4-4b7b-b45c-197db1c1e890\") " Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.342371 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/513c3ff8-3514-4d35-bbec-52fa4e2bd363-operator-scripts\") pod \"513c3ff8-3514-4d35-bbec-52fa4e2bd363\" (UID: \"513c3ff8-3514-4d35-bbec-52fa4e2bd363\") " Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.342412 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c5c9071a-fe25-4ca2-bee6-38fea725d4d4-operator-scripts\") pod \"c5c9071a-fe25-4ca2-bee6-38fea725d4d4\" (UID: \"c5c9071a-fe25-4ca2-bee6-38fea725d4d4\") " Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.343490 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c5c9071a-fe25-4ca2-bee6-38fea725d4d4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c5c9071a-fe25-4ca2-bee6-38fea725d4d4" (UID: "c5c9071a-fe25-4ca2-bee6-38fea725d4d4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.343985 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/423749f9-fd4e-4d6c-860e-e5d17269da04-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "423749f9-fd4e-4d6c-860e-e5d17269da04" (UID: "423749f9-fd4e-4d6c-860e-e5d17269da04"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.344056 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6e21e324-1ba4-4b7b-b45c-197db1c1e890-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6e21e324-1ba4-4b7b-b45c-197db1c1e890" (UID: "6e21e324-1ba4-4b7b-b45c-197db1c1e890"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.344106 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/513c3ff8-3514-4d35-bbec-52fa4e2bd363-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "513c3ff8-3514-4d35-bbec-52fa4e2bd363" (UID: "513c3ff8-3514-4d35-bbec-52fa4e2bd363"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.347999 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5c9071a-fe25-4ca2-bee6-38fea725d4d4-kube-api-access-wp7fp" (OuterVolumeSpecName: "kube-api-access-wp7fp") pod "c5c9071a-fe25-4ca2-bee6-38fea725d4d4" (UID: "c5c9071a-fe25-4ca2-bee6-38fea725d4d4"). InnerVolumeSpecName "kube-api-access-wp7fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.348120 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/513c3ff8-3514-4d35-bbec-52fa4e2bd363-kube-api-access-g5xps" (OuterVolumeSpecName: "kube-api-access-g5xps") pod "513c3ff8-3514-4d35-bbec-52fa4e2bd363" (UID: "513c3ff8-3514-4d35-bbec-52fa4e2bd363"). InnerVolumeSpecName "kube-api-access-g5xps". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.348680 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/423749f9-fd4e-4d6c-860e-e5d17269da04-kube-api-access-xlbqr" (OuterVolumeSpecName: "kube-api-access-xlbqr") pod "423749f9-fd4e-4d6c-860e-e5d17269da04" (UID: "423749f9-fd4e-4d6c-860e-e5d17269da04"). InnerVolumeSpecName "kube-api-access-xlbqr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.348804 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e21e324-1ba4-4b7b-b45c-197db1c1e890-kube-api-access-8b59b" (OuterVolumeSpecName: "kube-api-access-8b59b") pod "6e21e324-1ba4-4b7b-b45c-197db1c1e890" (UID: "6e21e324-1ba4-4b7b-b45c-197db1c1e890"). InnerVolumeSpecName "kube-api-access-8b59b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.444626 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rn7td\" (UniqueName: \"kubernetes.io/projected/6efa34a3-9747-4cb3-b829-f4d95b402668-kube-api-access-rn7td\") pod \"6efa34a3-9747-4cb3-b829-f4d95b402668\" (UID: \"6efa34a3-9747-4cb3-b829-f4d95b402668\") " Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.444879 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6efa34a3-9747-4cb3-b829-f4d95b402668-operator-scripts\") pod \"6efa34a3-9747-4cb3-b829-f4d95b402668\" (UID: \"6efa34a3-9747-4cb3-b829-f4d95b402668\") " Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.445434 4854 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6e21e324-1ba4-4b7b-b45c-197db1c1e890-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.445452 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g5xps\" (UniqueName: \"kubernetes.io/projected/513c3ff8-3514-4d35-bbec-52fa4e2bd363-kube-api-access-g5xps\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.445463 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xlbqr\" (UniqueName: \"kubernetes.io/projected/423749f9-fd4e-4d6c-860e-e5d17269da04-kube-api-access-xlbqr\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.445472 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wp7fp\" (UniqueName: \"kubernetes.io/projected/c5c9071a-fe25-4ca2-bee6-38fea725d4d4-kube-api-access-wp7fp\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.445480 4854 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/423749f9-fd4e-4d6c-860e-e5d17269da04-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.445489 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8b59b\" (UniqueName: \"kubernetes.io/projected/6e21e324-1ba4-4b7b-b45c-197db1c1e890-kube-api-access-8b59b\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.445497 4854 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/513c3ff8-3514-4d35-bbec-52fa4e2bd363-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.445505 4854 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c5c9071a-fe25-4ca2-bee6-38fea725d4d4-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.445800 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6efa34a3-9747-4cb3-b829-f4d95b402668-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6efa34a3-9747-4cb3-b829-f4d95b402668" (UID: "6efa34a3-9747-4cb3-b829-f4d95b402668"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.448015 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6efa34a3-9747-4cb3-b829-f4d95b402668-kube-api-access-rn7td" (OuterVolumeSpecName: "kube-api-access-rn7td") pod "6efa34a3-9747-4cb3-b829-f4d95b402668" (UID: "6efa34a3-9747-4cb3-b829-f4d95b402668"). InnerVolumeSpecName "kube-api-access-rn7td". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.548363 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rn7td\" (UniqueName: \"kubernetes.io/projected/6efa34a3-9747-4cb3-b829-f4d95b402668-kube-api-access-rn7td\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:55 crc kubenswrapper[4854]: I1125 09:56:55.548420 4854 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6efa34a3-9747-4cb3-b829-f4d95b402668-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:56 crc kubenswrapper[4854]: I1125 09:56:56.335659 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-76e1-account-create-dv8dm" Nov 25 09:56:56 crc kubenswrapper[4854]: I1125 09:56:56.700312 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 25 09:56:57 crc kubenswrapper[4854]: I1125 09:56:57.601221 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/eb6d8324-0633-4891-9a9c-f782e7cec247-etc-swift\") pod \"swift-storage-0\" (UID: \"eb6d8324-0633-4891-9a9c-f782e7cec247\") " pod="openstack/swift-storage-0" Nov 25 09:56:57 crc kubenswrapper[4854]: E1125 09:56:57.601776 4854 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 09:56:57 crc kubenswrapper[4854]: E1125 09:56:57.601791 4854 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 09:56:57 crc kubenswrapper[4854]: E1125 09:56:57.601832 4854 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/eb6d8324-0633-4891-9a9c-f782e7cec247-etc-swift podName:eb6d8324-0633-4891-9a9c-f782e7cec247 nodeName:}" failed. No retries permitted until 2025-11-25 09:57:13.601819248 +0000 UTC m=+1239.454812624 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/eb6d8324-0633-4891-9a9c-f782e7cec247-etc-swift") pod "swift-storage-0" (UID: "eb6d8324-0633-4891-9a9c-f782e7cec247") : configmap "swift-ring-files" not found Nov 25 09:56:58 crc kubenswrapper[4854]: I1125 09:56:58.904365 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-7wk5r"] Nov 25 09:56:58 crc kubenswrapper[4854]: E1125 09:56:58.905057 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="423749f9-fd4e-4d6c-860e-e5d17269da04" containerName="mariadb-database-create" Nov 25 09:56:58 crc kubenswrapper[4854]: I1125 09:56:58.905070 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="423749f9-fd4e-4d6c-860e-e5d17269da04" containerName="mariadb-database-create" Nov 25 09:56:58 crc kubenswrapper[4854]: E1125 09:56:58.905088 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5c9071a-fe25-4ca2-bee6-38fea725d4d4" containerName="mariadb-account-create" Nov 25 09:56:58 crc kubenswrapper[4854]: I1125 09:56:58.905095 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5c9071a-fe25-4ca2-bee6-38fea725d4d4" containerName="mariadb-account-create" Nov 25 09:56:58 crc kubenswrapper[4854]: E1125 09:56:58.905107 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2af4c5bf-31f7-4a80-8ceb-244ede8ba2b7" containerName="mariadb-account-create" Nov 25 09:56:58 crc kubenswrapper[4854]: I1125 09:56:58.905113 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="2af4c5bf-31f7-4a80-8ceb-244ede8ba2b7" containerName="mariadb-account-create" Nov 25 09:56:58 crc kubenswrapper[4854]: E1125 09:56:58.905125 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2cd76364-963f-4af7-83ef-cc73ab247e14" containerName="dnsmasq-dns" Nov 25 09:56:58 crc kubenswrapper[4854]: I1125 09:56:58.905130 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="2cd76364-963f-4af7-83ef-cc73ab247e14" containerName="dnsmasq-dns" Nov 25 09:56:58 crc kubenswrapper[4854]: E1125 09:56:58.905141 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9dfd5d17-f81b-40bd-82a8-e52a2c1d8429" containerName="mariadb-database-create" Nov 25 09:56:58 crc kubenswrapper[4854]: I1125 09:56:58.905147 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="9dfd5d17-f81b-40bd-82a8-e52a2c1d8429" containerName="mariadb-database-create" Nov 25 09:56:58 crc kubenswrapper[4854]: E1125 09:56:58.905154 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2cd76364-963f-4af7-83ef-cc73ab247e14" containerName="init" Nov 25 09:56:58 crc kubenswrapper[4854]: I1125 09:56:58.905159 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="2cd76364-963f-4af7-83ef-cc73ab247e14" containerName="init" Nov 25 09:56:58 crc kubenswrapper[4854]: E1125 09:56:58.905170 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76c016ea-a928-4d8a-9936-ca262a75afa4" containerName="mariadb-account-create" Nov 25 09:56:58 crc kubenswrapper[4854]: I1125 09:56:58.905176 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="76c016ea-a928-4d8a-9936-ca262a75afa4" containerName="mariadb-account-create" Nov 25 09:56:58 crc kubenswrapper[4854]: E1125 09:56:58.905188 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6efa34a3-9747-4cb3-b829-f4d95b402668" containerName="mariadb-database-create" Nov 25 09:56:58 crc kubenswrapper[4854]: I1125 09:56:58.905193 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="6efa34a3-9747-4cb3-b829-f4d95b402668" containerName="mariadb-database-create" Nov 25 09:56:58 crc kubenswrapper[4854]: E1125 09:56:58.905207 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f308b0a-c5d0-446b-8489-7a8fcdaac38f" containerName="console" Nov 25 09:56:58 crc kubenswrapper[4854]: I1125 09:56:58.905213 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f308b0a-c5d0-446b-8489-7a8fcdaac38f" containerName="console" Nov 25 09:56:58 crc kubenswrapper[4854]: E1125 09:56:58.905225 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="513c3ff8-3514-4d35-bbec-52fa4e2bd363" containerName="mariadb-account-create" Nov 25 09:56:58 crc kubenswrapper[4854]: I1125 09:56:58.905231 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="513c3ff8-3514-4d35-bbec-52fa4e2bd363" containerName="mariadb-account-create" Nov 25 09:56:58 crc kubenswrapper[4854]: E1125 09:56:58.905239 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e21e324-1ba4-4b7b-b45c-197db1c1e890" containerName="mariadb-database-create" Nov 25 09:56:58 crc kubenswrapper[4854]: I1125 09:56:58.905247 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e21e324-1ba4-4b7b-b45c-197db1c1e890" containerName="mariadb-database-create" Nov 25 09:56:58 crc kubenswrapper[4854]: I1125 09:56:58.905448 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="2cd76364-963f-4af7-83ef-cc73ab247e14" containerName="dnsmasq-dns" Nov 25 09:56:58 crc kubenswrapper[4854]: I1125 09:56:58.905461 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="9dfd5d17-f81b-40bd-82a8-e52a2c1d8429" containerName="mariadb-database-create" Nov 25 09:56:58 crc kubenswrapper[4854]: I1125 09:56:58.905471 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e21e324-1ba4-4b7b-b45c-197db1c1e890" containerName="mariadb-database-create" Nov 25 09:56:58 crc kubenswrapper[4854]: I1125 09:56:58.905483 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="513c3ff8-3514-4d35-bbec-52fa4e2bd363" containerName="mariadb-account-create" Nov 25 09:56:58 crc kubenswrapper[4854]: I1125 09:56:58.905493 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="6efa34a3-9747-4cb3-b829-f4d95b402668" containerName="mariadb-database-create" Nov 25 09:56:58 crc kubenswrapper[4854]: I1125 09:56:58.905502 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="423749f9-fd4e-4d6c-860e-e5d17269da04" containerName="mariadb-database-create" Nov 25 09:56:58 crc kubenswrapper[4854]: I1125 09:56:58.905510 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="2af4c5bf-31f7-4a80-8ceb-244ede8ba2b7" containerName="mariadb-account-create" Nov 25 09:56:58 crc kubenswrapper[4854]: I1125 09:56:58.905523 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="76c016ea-a928-4d8a-9936-ca262a75afa4" containerName="mariadb-account-create" Nov 25 09:56:58 crc kubenswrapper[4854]: I1125 09:56:58.905537 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5c9071a-fe25-4ca2-bee6-38fea725d4d4" containerName="mariadb-account-create" Nov 25 09:56:58 crc kubenswrapper[4854]: I1125 09:56:58.905550 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f308b0a-c5d0-446b-8489-7a8fcdaac38f" containerName="console" Nov 25 09:56:58 crc kubenswrapper[4854]: I1125 09:56:58.913245 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-7wk5r" Nov 25 09:56:58 crc kubenswrapper[4854]: I1125 09:56:58.915225 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-6k6vk" Nov 25 09:56:58 crc kubenswrapper[4854]: I1125 09:56:58.916814 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 25 09:56:58 crc kubenswrapper[4854]: I1125 09:56:58.928808 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-7wk5r"] Nov 25 09:56:59 crc kubenswrapper[4854]: I1125 09:56:59.040237 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jl6rq\" (UniqueName: \"kubernetes.io/projected/bd7395cb-72b7-4b86-860c-9ba1b3ccd34e-kube-api-access-jl6rq\") pod \"glance-db-sync-7wk5r\" (UID: \"bd7395cb-72b7-4b86-860c-9ba1b3ccd34e\") " pod="openstack/glance-db-sync-7wk5r" Nov 25 09:56:59 crc kubenswrapper[4854]: I1125 09:56:59.040304 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd7395cb-72b7-4b86-860c-9ba1b3ccd34e-combined-ca-bundle\") pod \"glance-db-sync-7wk5r\" (UID: \"bd7395cb-72b7-4b86-860c-9ba1b3ccd34e\") " pod="openstack/glance-db-sync-7wk5r" Nov 25 09:56:59 crc kubenswrapper[4854]: I1125 09:56:59.040361 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd7395cb-72b7-4b86-860c-9ba1b3ccd34e-config-data\") pod \"glance-db-sync-7wk5r\" (UID: \"bd7395cb-72b7-4b86-860c-9ba1b3ccd34e\") " pod="openstack/glance-db-sync-7wk5r" Nov 25 09:56:59 crc kubenswrapper[4854]: I1125 09:56:59.040565 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/bd7395cb-72b7-4b86-860c-9ba1b3ccd34e-db-sync-config-data\") pod \"glance-db-sync-7wk5r\" (UID: \"bd7395cb-72b7-4b86-860c-9ba1b3ccd34e\") " pod="openstack/glance-db-sync-7wk5r" Nov 25 09:56:59 crc kubenswrapper[4854]: I1125 09:56:59.143620 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/bd7395cb-72b7-4b86-860c-9ba1b3ccd34e-db-sync-config-data\") pod \"glance-db-sync-7wk5r\" (UID: \"bd7395cb-72b7-4b86-860c-9ba1b3ccd34e\") " pod="openstack/glance-db-sync-7wk5r" Nov 25 09:56:59 crc kubenswrapper[4854]: I1125 09:56:59.145343 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jl6rq\" (UniqueName: \"kubernetes.io/projected/bd7395cb-72b7-4b86-860c-9ba1b3ccd34e-kube-api-access-jl6rq\") pod \"glance-db-sync-7wk5r\" (UID: \"bd7395cb-72b7-4b86-860c-9ba1b3ccd34e\") " pod="openstack/glance-db-sync-7wk5r" Nov 25 09:56:59 crc kubenswrapper[4854]: I1125 09:56:59.145417 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd7395cb-72b7-4b86-860c-9ba1b3ccd34e-combined-ca-bundle\") pod \"glance-db-sync-7wk5r\" (UID: \"bd7395cb-72b7-4b86-860c-9ba1b3ccd34e\") " pod="openstack/glance-db-sync-7wk5r" Nov 25 09:56:59 crc kubenswrapper[4854]: I1125 09:56:59.145499 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd7395cb-72b7-4b86-860c-9ba1b3ccd34e-config-data\") pod \"glance-db-sync-7wk5r\" (UID: \"bd7395cb-72b7-4b86-860c-9ba1b3ccd34e\") " pod="openstack/glance-db-sync-7wk5r" Nov 25 09:56:59 crc kubenswrapper[4854]: I1125 09:56:59.155039 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd7395cb-72b7-4b86-860c-9ba1b3ccd34e-config-data\") pod \"glance-db-sync-7wk5r\" (UID: \"bd7395cb-72b7-4b86-860c-9ba1b3ccd34e\") " pod="openstack/glance-db-sync-7wk5r" Nov 25 09:56:59 crc kubenswrapper[4854]: I1125 09:56:59.162282 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd7395cb-72b7-4b86-860c-9ba1b3ccd34e-combined-ca-bundle\") pod \"glance-db-sync-7wk5r\" (UID: \"bd7395cb-72b7-4b86-860c-9ba1b3ccd34e\") " pod="openstack/glance-db-sync-7wk5r" Nov 25 09:56:59 crc kubenswrapper[4854]: I1125 09:56:59.165005 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/bd7395cb-72b7-4b86-860c-9ba1b3ccd34e-db-sync-config-data\") pod \"glance-db-sync-7wk5r\" (UID: \"bd7395cb-72b7-4b86-860c-9ba1b3ccd34e\") " pod="openstack/glance-db-sync-7wk5r" Nov 25 09:56:59 crc kubenswrapper[4854]: I1125 09:56:59.179587 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jl6rq\" (UniqueName: \"kubernetes.io/projected/bd7395cb-72b7-4b86-860c-9ba1b3ccd34e-kube-api-access-jl6rq\") pod \"glance-db-sync-7wk5r\" (UID: \"bd7395cb-72b7-4b86-860c-9ba1b3ccd34e\") " pod="openstack/glance-db-sync-7wk5r" Nov 25 09:56:59 crc kubenswrapper[4854]: I1125 09:56:59.265375 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-7wk5r" Nov 25 09:57:00 crc kubenswrapper[4854]: I1125 09:57:00.380735 4854 generic.go:334] "Generic (PLEG): container finished" podID="5a1d05ce-0c76-4823-a86f-004ea7655be9" containerID="4909e284025c019814d0dc52446700a46770988d57beca1d9f26b8979dae0854" exitCode=0 Nov 25 09:57:00 crc kubenswrapper[4854]: I1125 09:57:00.380815 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-m8t44" event={"ID":"5a1d05ce-0c76-4823-a86f-004ea7655be9","Type":"ContainerDied","Data":"4909e284025c019814d0dc52446700a46770988d57beca1d9f26b8979dae0854"} Nov 25 09:57:00 crc kubenswrapper[4854]: I1125 09:57:00.627848 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-t2zxg"] Nov 25 09:57:00 crc kubenswrapper[4854]: I1125 09:57:00.629172 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-t2zxg" Nov 25 09:57:00 crc kubenswrapper[4854]: I1125 09:57:00.642233 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-t2zxg"] Nov 25 09:57:00 crc kubenswrapper[4854]: I1125 09:57:00.779609 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sr7qw\" (UniqueName: \"kubernetes.io/projected/9a460ca1-2f26-4dd4-8618-ac0c329b4689-kube-api-access-sr7qw\") pod \"mysqld-exporter-openstack-cell1-db-create-t2zxg\" (UID: \"9a460ca1-2f26-4dd4-8618-ac0c329b4689\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-t2zxg" Nov 25 09:57:00 crc kubenswrapper[4854]: I1125 09:57:00.779834 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9a460ca1-2f26-4dd4-8618-ac0c329b4689-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-t2zxg\" (UID: \"9a460ca1-2f26-4dd4-8618-ac0c329b4689\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-t2zxg" Nov 25 09:57:00 crc kubenswrapper[4854]: I1125 09:57:00.837366 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-74f0-account-create-9v7gt"] Nov 25 09:57:00 crc kubenswrapper[4854]: I1125 09:57:00.838833 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-74f0-account-create-9v7gt" Nov 25 09:57:00 crc kubenswrapper[4854]: I1125 09:57:00.841031 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-openstack-cell1-db-secret" Nov 25 09:57:00 crc kubenswrapper[4854]: I1125 09:57:00.847536 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-74f0-account-create-9v7gt"] Nov 25 09:57:00 crc kubenswrapper[4854]: I1125 09:57:00.882849 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9a460ca1-2f26-4dd4-8618-ac0c329b4689-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-t2zxg\" (UID: \"9a460ca1-2f26-4dd4-8618-ac0c329b4689\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-t2zxg" Nov 25 09:57:00 crc kubenswrapper[4854]: I1125 09:57:00.883040 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sr7qw\" (UniqueName: \"kubernetes.io/projected/9a460ca1-2f26-4dd4-8618-ac0c329b4689-kube-api-access-sr7qw\") pod \"mysqld-exporter-openstack-cell1-db-create-t2zxg\" (UID: \"9a460ca1-2f26-4dd4-8618-ac0c329b4689\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-t2zxg" Nov 25 09:57:00 crc kubenswrapper[4854]: I1125 09:57:00.884848 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9a460ca1-2f26-4dd4-8618-ac0c329b4689-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-t2zxg\" (UID: \"9a460ca1-2f26-4dd4-8618-ac0c329b4689\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-t2zxg" Nov 25 09:57:00 crc kubenswrapper[4854]: I1125 09:57:00.910753 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sr7qw\" (UniqueName: \"kubernetes.io/projected/9a460ca1-2f26-4dd4-8618-ac0c329b4689-kube-api-access-sr7qw\") pod \"mysqld-exporter-openstack-cell1-db-create-t2zxg\" (UID: \"9a460ca1-2f26-4dd4-8618-ac0c329b4689\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-t2zxg" Nov 25 09:57:00 crc kubenswrapper[4854]: I1125 09:57:00.956311 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-t2zxg" Nov 25 09:57:00 crc kubenswrapper[4854]: I1125 09:57:00.984954 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-czjns\" (UniqueName: \"kubernetes.io/projected/f7e6380a-24e5-4ed2-aba6-e4b5a0adc0bc-kube-api-access-czjns\") pod \"mysqld-exporter-74f0-account-create-9v7gt\" (UID: \"f7e6380a-24e5-4ed2-aba6-e4b5a0adc0bc\") " pod="openstack/mysqld-exporter-74f0-account-create-9v7gt" Nov 25 09:57:00 crc kubenswrapper[4854]: I1125 09:57:00.985044 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f7e6380a-24e5-4ed2-aba6-e4b5a0adc0bc-operator-scripts\") pod \"mysqld-exporter-74f0-account-create-9v7gt\" (UID: \"f7e6380a-24e5-4ed2-aba6-e4b5a0adc0bc\") " pod="openstack/mysqld-exporter-74f0-account-create-9v7gt" Nov 25 09:57:01 crc kubenswrapper[4854]: I1125 09:57:01.089044 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f7e6380a-24e5-4ed2-aba6-e4b5a0adc0bc-operator-scripts\") pod \"mysqld-exporter-74f0-account-create-9v7gt\" (UID: \"f7e6380a-24e5-4ed2-aba6-e4b5a0adc0bc\") " pod="openstack/mysqld-exporter-74f0-account-create-9v7gt" Nov 25 09:57:01 crc kubenswrapper[4854]: I1125 09:57:01.089802 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-czjns\" (UniqueName: \"kubernetes.io/projected/f7e6380a-24e5-4ed2-aba6-e4b5a0adc0bc-kube-api-access-czjns\") pod \"mysqld-exporter-74f0-account-create-9v7gt\" (UID: \"f7e6380a-24e5-4ed2-aba6-e4b5a0adc0bc\") " pod="openstack/mysqld-exporter-74f0-account-create-9v7gt" Nov 25 09:57:01 crc kubenswrapper[4854]: I1125 09:57:01.090456 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f7e6380a-24e5-4ed2-aba6-e4b5a0adc0bc-operator-scripts\") pod \"mysqld-exporter-74f0-account-create-9v7gt\" (UID: \"f7e6380a-24e5-4ed2-aba6-e4b5a0adc0bc\") " pod="openstack/mysqld-exporter-74f0-account-create-9v7gt" Nov 25 09:57:01 crc kubenswrapper[4854]: I1125 09:57:01.107978 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-czjns\" (UniqueName: \"kubernetes.io/projected/f7e6380a-24e5-4ed2-aba6-e4b5a0adc0bc-kube-api-access-czjns\") pod \"mysqld-exporter-74f0-account-create-9v7gt\" (UID: \"f7e6380a-24e5-4ed2-aba6-e4b5a0adc0bc\") " pod="openstack/mysqld-exporter-74f0-account-create-9v7gt" Nov 25 09:57:01 crc kubenswrapper[4854]: I1125 09:57:01.164829 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-74f0-account-create-9v7gt" Nov 25 09:57:01 crc kubenswrapper[4854]: I1125 09:57:01.606959 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-t2zxg"] Nov 25 09:57:01 crc kubenswrapper[4854]: W1125 09:57:01.609179 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9a460ca1_2f26_4dd4_8618_ac0c329b4689.slice/crio-9d808caba75b4e5315d6fea48b7972769c67ee0c3f6dd69aac5b4cb810d42bc0 WatchSource:0}: Error finding container 9d808caba75b4e5315d6fea48b7972769c67ee0c3f6dd69aac5b4cb810d42bc0: Status 404 returned error can't find the container with id 9d808caba75b4e5315d6fea48b7972769c67ee0c3f6dd69aac5b4cb810d42bc0 Nov 25 09:57:01 crc kubenswrapper[4854]: I1125 09:57:01.732113 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-7wk5r"] Nov 25 09:57:01 crc kubenswrapper[4854]: I1125 09:57:01.819183 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-74f0-account-create-9v7gt"] Nov 25 09:57:01 crc kubenswrapper[4854]: W1125 09:57:01.820870 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf7e6380a_24e5_4ed2_aba6_e4b5a0adc0bc.slice/crio-2b293635fc1c021d61dc8cb99f70e636fe8bb2a64c3905a66112b7063c4a7982 WatchSource:0}: Error finding container 2b293635fc1c021d61dc8cb99f70e636fe8bb2a64c3905a66112b7063c4a7982: Status 404 returned error can't find the container with id 2b293635fc1c021d61dc8cb99f70e636fe8bb2a64c3905a66112b7063c4a7982 Nov 25 09:57:01 crc kubenswrapper[4854]: I1125 09:57:01.972028 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-m8t44" Nov 25 09:57:02 crc kubenswrapper[4854]: I1125 09:57:02.138527 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/5a1d05ce-0c76-4823-a86f-004ea7655be9-etc-swift\") pod \"5a1d05ce-0c76-4823-a86f-004ea7655be9\" (UID: \"5a1d05ce-0c76-4823-a86f-004ea7655be9\") " Nov 25 09:57:02 crc kubenswrapper[4854]: I1125 09:57:02.139021 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a1d05ce-0c76-4823-a86f-004ea7655be9-combined-ca-bundle\") pod \"5a1d05ce-0c76-4823-a86f-004ea7655be9\" (UID: \"5a1d05ce-0c76-4823-a86f-004ea7655be9\") " Nov 25 09:57:02 crc kubenswrapper[4854]: I1125 09:57:02.139169 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/5a1d05ce-0c76-4823-a86f-004ea7655be9-dispersionconf\") pod \"5a1d05ce-0c76-4823-a86f-004ea7655be9\" (UID: \"5a1d05ce-0c76-4823-a86f-004ea7655be9\") " Nov 25 09:57:02 crc kubenswrapper[4854]: I1125 09:57:02.139261 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/5a1d05ce-0c76-4823-a86f-004ea7655be9-swiftconf\") pod \"5a1d05ce-0c76-4823-a86f-004ea7655be9\" (UID: \"5a1d05ce-0c76-4823-a86f-004ea7655be9\") " Nov 25 09:57:02 crc kubenswrapper[4854]: I1125 09:57:02.139383 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r2wrc\" (UniqueName: \"kubernetes.io/projected/5a1d05ce-0c76-4823-a86f-004ea7655be9-kube-api-access-r2wrc\") pod \"5a1d05ce-0c76-4823-a86f-004ea7655be9\" (UID: \"5a1d05ce-0c76-4823-a86f-004ea7655be9\") " Nov 25 09:57:02 crc kubenswrapper[4854]: I1125 09:57:02.139525 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/5a1d05ce-0c76-4823-a86f-004ea7655be9-ring-data-devices\") pod \"5a1d05ce-0c76-4823-a86f-004ea7655be9\" (UID: \"5a1d05ce-0c76-4823-a86f-004ea7655be9\") " Nov 25 09:57:02 crc kubenswrapper[4854]: I1125 09:57:02.139607 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5a1d05ce-0c76-4823-a86f-004ea7655be9-scripts\") pod \"5a1d05ce-0c76-4823-a86f-004ea7655be9\" (UID: \"5a1d05ce-0c76-4823-a86f-004ea7655be9\") " Nov 25 09:57:02 crc kubenswrapper[4854]: I1125 09:57:02.140185 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5a1d05ce-0c76-4823-a86f-004ea7655be9-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "5a1d05ce-0c76-4823-a86f-004ea7655be9" (UID: "5a1d05ce-0c76-4823-a86f-004ea7655be9"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:57:02 crc kubenswrapper[4854]: I1125 09:57:02.141194 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5a1d05ce-0c76-4823-a86f-004ea7655be9-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "5a1d05ce-0c76-4823-a86f-004ea7655be9" (UID: "5a1d05ce-0c76-4823-a86f-004ea7655be9"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:57:02 crc kubenswrapper[4854]: I1125 09:57:02.154986 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a1d05ce-0c76-4823-a86f-004ea7655be9-kube-api-access-r2wrc" (OuterVolumeSpecName: "kube-api-access-r2wrc") pod "5a1d05ce-0c76-4823-a86f-004ea7655be9" (UID: "5a1d05ce-0c76-4823-a86f-004ea7655be9"). InnerVolumeSpecName "kube-api-access-r2wrc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:57:02 crc kubenswrapper[4854]: I1125 09:57:02.174928 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a1d05ce-0c76-4823-a86f-004ea7655be9-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "5a1d05ce-0c76-4823-a86f-004ea7655be9" (UID: "5a1d05ce-0c76-4823-a86f-004ea7655be9"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:57:02 crc kubenswrapper[4854]: I1125 09:57:02.186307 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5a1d05ce-0c76-4823-a86f-004ea7655be9-scripts" (OuterVolumeSpecName: "scripts") pod "5a1d05ce-0c76-4823-a86f-004ea7655be9" (UID: "5a1d05ce-0c76-4823-a86f-004ea7655be9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:57:02 crc kubenswrapper[4854]: I1125 09:57:02.205489 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a1d05ce-0c76-4823-a86f-004ea7655be9-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "5a1d05ce-0c76-4823-a86f-004ea7655be9" (UID: "5a1d05ce-0c76-4823-a86f-004ea7655be9"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:57:02 crc kubenswrapper[4854]: I1125 09:57:02.205575 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a1d05ce-0c76-4823-a86f-004ea7655be9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5a1d05ce-0c76-4823-a86f-004ea7655be9" (UID: "5a1d05ce-0c76-4823-a86f-004ea7655be9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:57:02 crc kubenswrapper[4854]: I1125 09:57:02.244659 4854 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/5a1d05ce-0c76-4823-a86f-004ea7655be9-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:02 crc kubenswrapper[4854]: I1125 09:57:02.244884 4854 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/5a1d05ce-0c76-4823-a86f-004ea7655be9-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:02 crc kubenswrapper[4854]: I1125 09:57:02.244972 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r2wrc\" (UniqueName: \"kubernetes.io/projected/5a1d05ce-0c76-4823-a86f-004ea7655be9-kube-api-access-r2wrc\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:02 crc kubenswrapper[4854]: I1125 09:57:02.245027 4854 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/5a1d05ce-0c76-4823-a86f-004ea7655be9-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:02 crc kubenswrapper[4854]: I1125 09:57:02.245084 4854 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5a1d05ce-0c76-4823-a86f-004ea7655be9-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:02 crc kubenswrapper[4854]: I1125 09:57:02.245135 4854 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/5a1d05ce-0c76-4823-a86f-004ea7655be9-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:02 crc kubenswrapper[4854]: I1125 09:57:02.245183 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a1d05ce-0c76-4823-a86f-004ea7655be9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:02 crc kubenswrapper[4854]: I1125 09:57:02.453137 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"93c535b4-23bb-4c71-8ddc-1304ca205e55","Type":"ContainerStarted","Data":"e14583c42ce5d5a2261a9b592f1cdc639741839ca7d9929733691ee983791aa3"} Nov 25 09:57:02 crc kubenswrapper[4854]: I1125 09:57:02.455948 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-7wk5r" event={"ID":"bd7395cb-72b7-4b86-860c-9ba1b3ccd34e","Type":"ContainerStarted","Data":"889b67a15505596efd60e54f09b441ed669b5e3caaff256c309fa9aa3e7d2423"} Nov 25 09:57:02 crc kubenswrapper[4854]: I1125 09:57:02.458144 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-m8t44" event={"ID":"5a1d05ce-0c76-4823-a86f-004ea7655be9","Type":"ContainerDied","Data":"b758832bfc0fb713e514e67547b7b3fa0d9401465d5f9be008227942ec57aac2"} Nov 25 09:57:02 crc kubenswrapper[4854]: I1125 09:57:02.458174 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-m8t44" Nov 25 09:57:02 crc kubenswrapper[4854]: I1125 09:57:02.458179 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b758832bfc0fb713e514e67547b7b3fa0d9401465d5f9be008227942ec57aac2" Nov 25 09:57:02 crc kubenswrapper[4854]: I1125 09:57:02.464869 4854 generic.go:334] "Generic (PLEG): container finished" podID="f7e6380a-24e5-4ed2-aba6-e4b5a0adc0bc" containerID="50e9a62c0abbe5c38a739be7b8cf7f8e187369ad98c2621efe205c6bf417c488" exitCode=0 Nov 25 09:57:02 crc kubenswrapper[4854]: I1125 09:57:02.464932 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-74f0-account-create-9v7gt" event={"ID":"f7e6380a-24e5-4ed2-aba6-e4b5a0adc0bc","Type":"ContainerDied","Data":"50e9a62c0abbe5c38a739be7b8cf7f8e187369ad98c2621efe205c6bf417c488"} Nov 25 09:57:02 crc kubenswrapper[4854]: I1125 09:57:02.464957 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-74f0-account-create-9v7gt" event={"ID":"f7e6380a-24e5-4ed2-aba6-e4b5a0adc0bc","Type":"ContainerStarted","Data":"2b293635fc1c021d61dc8cb99f70e636fe8bb2a64c3905a66112b7063c4a7982"} Nov 25 09:57:02 crc kubenswrapper[4854]: I1125 09:57:02.473051 4854 generic.go:334] "Generic (PLEG): container finished" podID="9a460ca1-2f26-4dd4-8618-ac0c329b4689" containerID="332e57882fa8752c800c3eefd29bd426a505503148fb91314bcf43a6d4e8ac07" exitCode=0 Nov 25 09:57:02 crc kubenswrapper[4854]: I1125 09:57:02.473084 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-t2zxg" event={"ID":"9a460ca1-2f26-4dd4-8618-ac0c329b4689","Type":"ContainerDied","Data":"332e57882fa8752c800c3eefd29bd426a505503148fb91314bcf43a6d4e8ac07"} Nov 25 09:57:02 crc kubenswrapper[4854]: I1125 09:57:02.473107 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-t2zxg" event={"ID":"9a460ca1-2f26-4dd4-8618-ac0c329b4689","Type":"ContainerStarted","Data":"9d808caba75b4e5315d6fea48b7972769c67ee0c3f6dd69aac5b4cb810d42bc0"} Nov 25 09:57:03 crc kubenswrapper[4854]: I1125 09:57:03.353942 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-7dl26" podUID="04573f28-a6e2-46ca-8a02-a2265c5d68e9" containerName="ovn-controller" probeResult="failure" output=< Nov 25 09:57:03 crc kubenswrapper[4854]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 25 09:57:03 crc kubenswrapper[4854]: > Nov 25 09:57:03 crc kubenswrapper[4854]: I1125 09:57:03.504540 4854 generic.go:334] "Generic (PLEG): container finished" podID="6894f0be-f53f-401b-8707-4cc0cfd020dc" containerID="d885495fc11b7f4d9575608f39ce77e3f0ad78db4459a1f55ec565f35edd06ec" exitCode=0 Nov 25 09:57:03 crc kubenswrapper[4854]: I1125 09:57:03.504785 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"6894f0be-f53f-401b-8707-4cc0cfd020dc","Type":"ContainerDied","Data":"d885495fc11b7f4d9575608f39ce77e3f0ad78db4459a1f55ec565f35edd06ec"} Nov 25 09:57:04 crc kubenswrapper[4854]: I1125 09:57:04.029250 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-t2zxg" Nov 25 09:57:04 crc kubenswrapper[4854]: I1125 09:57:04.067357 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-74f0-account-create-9v7gt" Nov 25 09:57:04 crc kubenswrapper[4854]: I1125 09:57:04.109168 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f7e6380a-24e5-4ed2-aba6-e4b5a0adc0bc-operator-scripts\") pod \"f7e6380a-24e5-4ed2-aba6-e4b5a0adc0bc\" (UID: \"f7e6380a-24e5-4ed2-aba6-e4b5a0adc0bc\") " Nov 25 09:57:04 crc kubenswrapper[4854]: I1125 09:57:04.110167 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f7e6380a-24e5-4ed2-aba6-e4b5a0adc0bc-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f7e6380a-24e5-4ed2-aba6-e4b5a0adc0bc" (UID: "f7e6380a-24e5-4ed2-aba6-e4b5a0adc0bc"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:57:04 crc kubenswrapper[4854]: I1125 09:57:04.111289 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-czjns\" (UniqueName: \"kubernetes.io/projected/f7e6380a-24e5-4ed2-aba6-e4b5a0adc0bc-kube-api-access-czjns\") pod \"f7e6380a-24e5-4ed2-aba6-e4b5a0adc0bc\" (UID: \"f7e6380a-24e5-4ed2-aba6-e4b5a0adc0bc\") " Nov 25 09:57:04 crc kubenswrapper[4854]: I1125 09:57:04.111446 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sr7qw\" (UniqueName: \"kubernetes.io/projected/9a460ca1-2f26-4dd4-8618-ac0c329b4689-kube-api-access-sr7qw\") pod \"9a460ca1-2f26-4dd4-8618-ac0c329b4689\" (UID: \"9a460ca1-2f26-4dd4-8618-ac0c329b4689\") " Nov 25 09:57:04 crc kubenswrapper[4854]: I1125 09:57:04.111643 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9a460ca1-2f26-4dd4-8618-ac0c329b4689-operator-scripts\") pod \"9a460ca1-2f26-4dd4-8618-ac0c329b4689\" (UID: \"9a460ca1-2f26-4dd4-8618-ac0c329b4689\") " Nov 25 09:57:04 crc kubenswrapper[4854]: I1125 09:57:04.115192 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9a460ca1-2f26-4dd4-8618-ac0c329b4689-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9a460ca1-2f26-4dd4-8618-ac0c329b4689" (UID: "9a460ca1-2f26-4dd4-8618-ac0c329b4689"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:57:04 crc kubenswrapper[4854]: I1125 09:57:04.118062 4854 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9a460ca1-2f26-4dd4-8618-ac0c329b4689-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:04 crc kubenswrapper[4854]: I1125 09:57:04.118760 4854 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f7e6380a-24e5-4ed2-aba6-e4b5a0adc0bc-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:04 crc kubenswrapper[4854]: I1125 09:57:04.171275 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7e6380a-24e5-4ed2-aba6-e4b5a0adc0bc-kube-api-access-czjns" (OuterVolumeSpecName: "kube-api-access-czjns") pod "f7e6380a-24e5-4ed2-aba6-e4b5a0adc0bc" (UID: "f7e6380a-24e5-4ed2-aba6-e4b5a0adc0bc"). InnerVolumeSpecName "kube-api-access-czjns". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:57:04 crc kubenswrapper[4854]: I1125 09:57:04.171365 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a460ca1-2f26-4dd4-8618-ac0c329b4689-kube-api-access-sr7qw" (OuterVolumeSpecName: "kube-api-access-sr7qw") pod "9a460ca1-2f26-4dd4-8618-ac0c329b4689" (UID: "9a460ca1-2f26-4dd4-8618-ac0c329b4689"). InnerVolumeSpecName "kube-api-access-sr7qw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:57:04 crc kubenswrapper[4854]: I1125 09:57:04.220983 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-czjns\" (UniqueName: \"kubernetes.io/projected/f7e6380a-24e5-4ed2-aba6-e4b5a0adc0bc-kube-api-access-czjns\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:04 crc kubenswrapper[4854]: I1125 09:57:04.221023 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sr7qw\" (UniqueName: \"kubernetes.io/projected/9a460ca1-2f26-4dd4-8618-ac0c329b4689-kube-api-access-sr7qw\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:04 crc kubenswrapper[4854]: I1125 09:57:04.515462 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"6894f0be-f53f-401b-8707-4cc0cfd020dc","Type":"ContainerStarted","Data":"1d6a33eaeeb7669f31369c6579ed254715aa5a4dd07d934b4febc719bea30b1a"} Nov 25 09:57:04 crc kubenswrapper[4854]: I1125 09:57:04.515869 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 25 09:57:04 crc kubenswrapper[4854]: I1125 09:57:04.518040 4854 generic.go:334] "Generic (PLEG): container finished" podID="dc586641-37b8-4b9b-8479-a3c552bec71d" containerID="1d56aa5fdd0201276ecfee6387fdafa67cd3f4cd93571921d3582c84f66d1f16" exitCode=0 Nov 25 09:57:04 crc kubenswrapper[4854]: I1125 09:57:04.518086 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"dc586641-37b8-4b9b-8479-a3c552bec71d","Type":"ContainerDied","Data":"1d56aa5fdd0201276ecfee6387fdafa67cd3f4cd93571921d3582c84f66d1f16"} Nov 25 09:57:04 crc kubenswrapper[4854]: I1125 09:57:04.520124 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-74f0-account-create-9v7gt" event={"ID":"f7e6380a-24e5-4ed2-aba6-e4b5a0adc0bc","Type":"ContainerDied","Data":"2b293635fc1c021d61dc8cb99f70e636fe8bb2a64c3905a66112b7063c4a7982"} Nov 25 09:57:04 crc kubenswrapper[4854]: I1125 09:57:04.520147 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2b293635fc1c021d61dc8cb99f70e636fe8bb2a64c3905a66112b7063c4a7982" Nov 25 09:57:04 crc kubenswrapper[4854]: I1125 09:57:04.520210 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-74f0-account-create-9v7gt" Nov 25 09:57:04 crc kubenswrapper[4854]: I1125 09:57:04.531728 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-t2zxg" event={"ID":"9a460ca1-2f26-4dd4-8618-ac0c329b4689","Type":"ContainerDied","Data":"9d808caba75b4e5315d6fea48b7972769c67ee0c3f6dd69aac5b4cb810d42bc0"} Nov 25 09:57:04 crc kubenswrapper[4854]: I1125 09:57:04.531943 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-t2zxg" Nov 25 09:57:04 crc kubenswrapper[4854]: I1125 09:57:04.532324 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9d808caba75b4e5315d6fea48b7972769c67ee0c3f6dd69aac5b4cb810d42bc0" Nov 25 09:57:04 crc kubenswrapper[4854]: I1125 09:57:04.548000 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=54.993956558 podStartE2EDuration="1m11.547974661s" podCreationTimestamp="2025-11-25 09:55:53 +0000 UTC" firstStartedPulling="2025-11-25 09:56:10.632362967 +0000 UTC m=+1176.485356343" lastFinishedPulling="2025-11-25 09:56:27.18638106 +0000 UTC m=+1193.039374446" observedRunningTime="2025-11-25 09:57:04.544439935 +0000 UTC m=+1230.397433331" watchObservedRunningTime="2025-11-25 09:57:04.547974661 +0000 UTC m=+1230.400968057" Nov 25 09:57:05 crc kubenswrapper[4854]: I1125 09:57:05.544028 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"dc586641-37b8-4b9b-8479-a3c552bec71d","Type":"ContainerStarted","Data":"8d6f89d7d6fbc896c63253f9ef919d0be6fde4db080cb205d97fa50cdbb02239"} Nov 25 09:57:05 crc kubenswrapper[4854]: I1125 09:57:05.544480 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-2" Nov 25 09:57:05 crc kubenswrapper[4854]: I1125 09:57:05.546808 4854 generic.go:334] "Generic (PLEG): container finished" podID="c24229dd-3c9c-47b6-8080-a1d51e0e6868" containerID="18a3e87a49d1ced9b4d2c37ea39a103b0fcdb27717118ecf2741f03bc71c3c0e" exitCode=0 Nov 25 09:57:05 crc kubenswrapper[4854]: I1125 09:57:05.546874 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"c24229dd-3c9c-47b6-8080-a1d51e0e6868","Type":"ContainerDied","Data":"18a3e87a49d1ced9b4d2c37ea39a103b0fcdb27717118ecf2741f03bc71c3c0e"} Nov 25 09:57:05 crc kubenswrapper[4854]: I1125 09:57:05.550236 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"93c535b4-23bb-4c71-8ddc-1304ca205e55","Type":"ContainerStarted","Data":"9622afa4b2cbb87f9b6fa5ca3f47e7bc5b92acc2a407dc54fa80c47312a3e647"} Nov 25 09:57:05 crc kubenswrapper[4854]: I1125 09:57:05.612573 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-2" podStartSLOduration=55.231758753 podStartE2EDuration="1m12.612557776s" podCreationTimestamp="2025-11-25 09:55:53 +0000 UTC" firstStartedPulling="2025-11-25 09:56:10.581943004 +0000 UTC m=+1176.434936380" lastFinishedPulling="2025-11-25 09:56:27.962742027 +0000 UTC m=+1193.815735403" observedRunningTime="2025-11-25 09:57:05.583061096 +0000 UTC m=+1231.436054492" watchObservedRunningTime="2025-11-25 09:57:05.612557776 +0000 UTC m=+1231.465551152" Nov 25 09:57:06 crc kubenswrapper[4854]: I1125 09:57:06.047988 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-0"] Nov 25 09:57:06 crc kubenswrapper[4854]: E1125 09:57:06.048382 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a460ca1-2f26-4dd4-8618-ac0c329b4689" containerName="mariadb-database-create" Nov 25 09:57:06 crc kubenswrapper[4854]: I1125 09:57:06.048398 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a460ca1-2f26-4dd4-8618-ac0c329b4689" containerName="mariadb-database-create" Nov 25 09:57:06 crc kubenswrapper[4854]: E1125 09:57:06.048422 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7e6380a-24e5-4ed2-aba6-e4b5a0adc0bc" containerName="mariadb-account-create" Nov 25 09:57:06 crc kubenswrapper[4854]: I1125 09:57:06.048429 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7e6380a-24e5-4ed2-aba6-e4b5a0adc0bc" containerName="mariadb-account-create" Nov 25 09:57:06 crc kubenswrapper[4854]: E1125 09:57:06.048439 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a1d05ce-0c76-4823-a86f-004ea7655be9" containerName="swift-ring-rebalance" Nov 25 09:57:06 crc kubenswrapper[4854]: I1125 09:57:06.048445 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a1d05ce-0c76-4823-a86f-004ea7655be9" containerName="swift-ring-rebalance" Nov 25 09:57:06 crc kubenswrapper[4854]: I1125 09:57:06.048653 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7e6380a-24e5-4ed2-aba6-e4b5a0adc0bc" containerName="mariadb-account-create" Nov 25 09:57:06 crc kubenswrapper[4854]: I1125 09:57:06.048689 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a1d05ce-0c76-4823-a86f-004ea7655be9" containerName="swift-ring-rebalance" Nov 25 09:57:06 crc kubenswrapper[4854]: I1125 09:57:06.048701 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a460ca1-2f26-4dd4-8618-ac0c329b4689" containerName="mariadb-database-create" Nov 25 09:57:06 crc kubenswrapper[4854]: I1125 09:57:06.049380 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Nov 25 09:57:06 crc kubenswrapper[4854]: I1125 09:57:06.053167 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-config-data" Nov 25 09:57:06 crc kubenswrapper[4854]: I1125 09:57:06.067109 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Nov 25 09:57:06 crc kubenswrapper[4854]: I1125 09:57:06.166727 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8bvsj\" (UniqueName: \"kubernetes.io/projected/8a1342b2-9a0f-42fe-85f4-d706eb2587d4-kube-api-access-8bvsj\") pod \"mysqld-exporter-0\" (UID: \"8a1342b2-9a0f-42fe-85f4-d706eb2587d4\") " pod="openstack/mysqld-exporter-0" Nov 25 09:57:06 crc kubenswrapper[4854]: I1125 09:57:06.167194 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a1342b2-9a0f-42fe-85f4-d706eb2587d4-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"8a1342b2-9a0f-42fe-85f4-d706eb2587d4\") " pod="openstack/mysqld-exporter-0" Nov 25 09:57:06 crc kubenswrapper[4854]: I1125 09:57:06.167632 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a1342b2-9a0f-42fe-85f4-d706eb2587d4-config-data\") pod \"mysqld-exporter-0\" (UID: \"8a1342b2-9a0f-42fe-85f4-d706eb2587d4\") " pod="openstack/mysqld-exporter-0" Nov 25 09:57:06 crc kubenswrapper[4854]: I1125 09:57:06.270267 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8bvsj\" (UniqueName: \"kubernetes.io/projected/8a1342b2-9a0f-42fe-85f4-d706eb2587d4-kube-api-access-8bvsj\") pod \"mysqld-exporter-0\" (UID: \"8a1342b2-9a0f-42fe-85f4-d706eb2587d4\") " pod="openstack/mysqld-exporter-0" Nov 25 09:57:06 crc kubenswrapper[4854]: I1125 09:57:06.270366 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a1342b2-9a0f-42fe-85f4-d706eb2587d4-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"8a1342b2-9a0f-42fe-85f4-d706eb2587d4\") " pod="openstack/mysqld-exporter-0" Nov 25 09:57:06 crc kubenswrapper[4854]: I1125 09:57:06.270534 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a1342b2-9a0f-42fe-85f4-d706eb2587d4-config-data\") pod \"mysqld-exporter-0\" (UID: \"8a1342b2-9a0f-42fe-85f4-d706eb2587d4\") " pod="openstack/mysqld-exporter-0" Nov 25 09:57:06 crc kubenswrapper[4854]: I1125 09:57:06.280834 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a1342b2-9a0f-42fe-85f4-d706eb2587d4-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"8a1342b2-9a0f-42fe-85f4-d706eb2587d4\") " pod="openstack/mysqld-exporter-0" Nov 25 09:57:06 crc kubenswrapper[4854]: I1125 09:57:06.295289 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a1342b2-9a0f-42fe-85f4-d706eb2587d4-config-data\") pod \"mysqld-exporter-0\" (UID: \"8a1342b2-9a0f-42fe-85f4-d706eb2587d4\") " pod="openstack/mysqld-exporter-0" Nov 25 09:57:06 crc kubenswrapper[4854]: I1125 09:57:06.296604 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8bvsj\" (UniqueName: \"kubernetes.io/projected/8a1342b2-9a0f-42fe-85f4-d706eb2587d4-kube-api-access-8bvsj\") pod \"mysqld-exporter-0\" (UID: \"8a1342b2-9a0f-42fe-85f4-d706eb2587d4\") " pod="openstack/mysqld-exporter-0" Nov 25 09:57:06 crc kubenswrapper[4854]: I1125 09:57:06.365506 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Nov 25 09:57:06 crc kubenswrapper[4854]: I1125 09:57:06.564857 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"c24229dd-3c9c-47b6-8080-a1d51e0e6868","Type":"ContainerStarted","Data":"b1ab910acfdf23678ef8580d24c8bdfd98cbcbb3917348cb8b0ddf0c2730553b"} Nov 25 09:57:06 crc kubenswrapper[4854]: I1125 09:57:06.565889 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-1" Nov 25 09:57:06 crc kubenswrapper[4854]: I1125 09:57:06.607435 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-1" podStartSLOduration=56.639664044 podStartE2EDuration="1m13.607416837s" podCreationTimestamp="2025-11-25 09:55:53 +0000 UTC" firstStartedPulling="2025-11-25 09:56:10.596868723 +0000 UTC m=+1176.449862099" lastFinishedPulling="2025-11-25 09:56:27.564621506 +0000 UTC m=+1193.417614892" observedRunningTime="2025-11-25 09:57:06.593598668 +0000 UTC m=+1232.446592054" watchObservedRunningTime="2025-11-25 09:57:06.607416837 +0000 UTC m=+1232.460410203" Nov 25 09:57:06 crc kubenswrapper[4854]: I1125 09:57:06.923703 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Nov 25 09:57:06 crc kubenswrapper[4854]: W1125 09:57:06.927713 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8a1342b2_9a0f_42fe_85f4_d706eb2587d4.slice/crio-a6710b883700dd6b7b4ce3f4d9543f074cd0a42930bb7c7a1f0ca20c70859ece WatchSource:0}: Error finding container a6710b883700dd6b7b4ce3f4d9543f074cd0a42930bb7c7a1f0ca20c70859ece: Status 404 returned error can't find the container with id a6710b883700dd6b7b4ce3f4d9543f074cd0a42930bb7c7a1f0ca20c70859ece Nov 25 09:57:07 crc kubenswrapper[4854]: I1125 09:57:07.579510 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"8a1342b2-9a0f-42fe-85f4-d706eb2587d4","Type":"ContainerStarted","Data":"a6710b883700dd6b7b4ce3f4d9543f074cd0a42930bb7c7a1f0ca20c70859ece"} Nov 25 09:57:08 crc kubenswrapper[4854]: I1125 09:57:08.366789 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-7dl26" podUID="04573f28-a6e2-46ca-8a02-a2265c5d68e9" containerName="ovn-controller" probeResult="failure" output=< Nov 25 09:57:08 crc kubenswrapper[4854]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 25 09:57:08 crc kubenswrapper[4854]: > Nov 25 09:57:08 crc kubenswrapper[4854]: I1125 09:57:08.382342 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-2t2j4" Nov 25 09:57:08 crc kubenswrapper[4854]: I1125 09:57:08.399192 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-2t2j4" Nov 25 09:57:08 crc kubenswrapper[4854]: I1125 09:57:08.670886 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-7dl26-config-xhzrk"] Nov 25 09:57:08 crc kubenswrapper[4854]: I1125 09:57:08.672236 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-7dl26-config-xhzrk" Nov 25 09:57:08 crc kubenswrapper[4854]: I1125 09:57:08.682624 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 25 09:57:08 crc kubenswrapper[4854]: I1125 09:57:08.702510 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-7dl26-config-xhzrk"] Nov 25 09:57:08 crc kubenswrapper[4854]: I1125 09:57:08.833835 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5737473b-6084-41b0-8cfd-cb590252b6b4-var-run\") pod \"ovn-controller-7dl26-config-xhzrk\" (UID: \"5737473b-6084-41b0-8cfd-cb590252b6b4\") " pod="openstack/ovn-controller-7dl26-config-xhzrk" Nov 25 09:57:08 crc kubenswrapper[4854]: I1125 09:57:08.833882 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jclzn\" (UniqueName: \"kubernetes.io/projected/5737473b-6084-41b0-8cfd-cb590252b6b4-kube-api-access-jclzn\") pod \"ovn-controller-7dl26-config-xhzrk\" (UID: \"5737473b-6084-41b0-8cfd-cb590252b6b4\") " pod="openstack/ovn-controller-7dl26-config-xhzrk" Nov 25 09:57:08 crc kubenswrapper[4854]: I1125 09:57:08.833935 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5737473b-6084-41b0-8cfd-cb590252b6b4-scripts\") pod \"ovn-controller-7dl26-config-xhzrk\" (UID: \"5737473b-6084-41b0-8cfd-cb590252b6b4\") " pod="openstack/ovn-controller-7dl26-config-xhzrk" Nov 25 09:57:08 crc kubenswrapper[4854]: I1125 09:57:08.834112 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5737473b-6084-41b0-8cfd-cb590252b6b4-var-run-ovn\") pod \"ovn-controller-7dl26-config-xhzrk\" (UID: \"5737473b-6084-41b0-8cfd-cb590252b6b4\") " pod="openstack/ovn-controller-7dl26-config-xhzrk" Nov 25 09:57:08 crc kubenswrapper[4854]: I1125 09:57:08.834260 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5737473b-6084-41b0-8cfd-cb590252b6b4-additional-scripts\") pod \"ovn-controller-7dl26-config-xhzrk\" (UID: \"5737473b-6084-41b0-8cfd-cb590252b6b4\") " pod="openstack/ovn-controller-7dl26-config-xhzrk" Nov 25 09:57:08 crc kubenswrapper[4854]: I1125 09:57:08.834440 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5737473b-6084-41b0-8cfd-cb590252b6b4-var-log-ovn\") pod \"ovn-controller-7dl26-config-xhzrk\" (UID: \"5737473b-6084-41b0-8cfd-cb590252b6b4\") " pod="openstack/ovn-controller-7dl26-config-xhzrk" Nov 25 09:57:08 crc kubenswrapper[4854]: I1125 09:57:08.938004 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5737473b-6084-41b0-8cfd-cb590252b6b4-var-run\") pod \"ovn-controller-7dl26-config-xhzrk\" (UID: \"5737473b-6084-41b0-8cfd-cb590252b6b4\") " pod="openstack/ovn-controller-7dl26-config-xhzrk" Nov 25 09:57:08 crc kubenswrapper[4854]: I1125 09:57:08.938059 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jclzn\" (UniqueName: \"kubernetes.io/projected/5737473b-6084-41b0-8cfd-cb590252b6b4-kube-api-access-jclzn\") pod \"ovn-controller-7dl26-config-xhzrk\" (UID: \"5737473b-6084-41b0-8cfd-cb590252b6b4\") " pod="openstack/ovn-controller-7dl26-config-xhzrk" Nov 25 09:57:08 crc kubenswrapper[4854]: I1125 09:57:08.938114 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5737473b-6084-41b0-8cfd-cb590252b6b4-scripts\") pod \"ovn-controller-7dl26-config-xhzrk\" (UID: \"5737473b-6084-41b0-8cfd-cb590252b6b4\") " pod="openstack/ovn-controller-7dl26-config-xhzrk" Nov 25 09:57:08 crc kubenswrapper[4854]: I1125 09:57:08.938165 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5737473b-6084-41b0-8cfd-cb590252b6b4-var-run-ovn\") pod \"ovn-controller-7dl26-config-xhzrk\" (UID: \"5737473b-6084-41b0-8cfd-cb590252b6b4\") " pod="openstack/ovn-controller-7dl26-config-xhzrk" Nov 25 09:57:08 crc kubenswrapper[4854]: I1125 09:57:08.938206 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5737473b-6084-41b0-8cfd-cb590252b6b4-additional-scripts\") pod \"ovn-controller-7dl26-config-xhzrk\" (UID: \"5737473b-6084-41b0-8cfd-cb590252b6b4\") " pod="openstack/ovn-controller-7dl26-config-xhzrk" Nov 25 09:57:08 crc kubenswrapper[4854]: I1125 09:57:08.938266 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5737473b-6084-41b0-8cfd-cb590252b6b4-var-log-ovn\") pod \"ovn-controller-7dl26-config-xhzrk\" (UID: \"5737473b-6084-41b0-8cfd-cb590252b6b4\") " pod="openstack/ovn-controller-7dl26-config-xhzrk" Nov 25 09:57:08 crc kubenswrapper[4854]: I1125 09:57:08.938749 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5737473b-6084-41b0-8cfd-cb590252b6b4-var-log-ovn\") pod \"ovn-controller-7dl26-config-xhzrk\" (UID: \"5737473b-6084-41b0-8cfd-cb590252b6b4\") " pod="openstack/ovn-controller-7dl26-config-xhzrk" Nov 25 09:57:08 crc kubenswrapper[4854]: I1125 09:57:08.939085 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5737473b-6084-41b0-8cfd-cb590252b6b4-var-run\") pod \"ovn-controller-7dl26-config-xhzrk\" (UID: \"5737473b-6084-41b0-8cfd-cb590252b6b4\") " pod="openstack/ovn-controller-7dl26-config-xhzrk" Nov 25 09:57:08 crc kubenswrapper[4854]: I1125 09:57:08.939155 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5737473b-6084-41b0-8cfd-cb590252b6b4-var-run-ovn\") pod \"ovn-controller-7dl26-config-xhzrk\" (UID: \"5737473b-6084-41b0-8cfd-cb590252b6b4\") " pod="openstack/ovn-controller-7dl26-config-xhzrk" Nov 25 09:57:08 crc kubenswrapper[4854]: I1125 09:57:08.940933 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5737473b-6084-41b0-8cfd-cb590252b6b4-additional-scripts\") pod \"ovn-controller-7dl26-config-xhzrk\" (UID: \"5737473b-6084-41b0-8cfd-cb590252b6b4\") " pod="openstack/ovn-controller-7dl26-config-xhzrk" Nov 25 09:57:08 crc kubenswrapper[4854]: I1125 09:57:08.941326 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5737473b-6084-41b0-8cfd-cb590252b6b4-scripts\") pod \"ovn-controller-7dl26-config-xhzrk\" (UID: \"5737473b-6084-41b0-8cfd-cb590252b6b4\") " pod="openstack/ovn-controller-7dl26-config-xhzrk" Nov 25 09:57:08 crc kubenswrapper[4854]: I1125 09:57:08.956276 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jclzn\" (UniqueName: \"kubernetes.io/projected/5737473b-6084-41b0-8cfd-cb590252b6b4-kube-api-access-jclzn\") pod \"ovn-controller-7dl26-config-xhzrk\" (UID: \"5737473b-6084-41b0-8cfd-cb590252b6b4\") " pod="openstack/ovn-controller-7dl26-config-xhzrk" Nov 25 09:57:09 crc kubenswrapper[4854]: I1125 09:57:09.008232 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-7dl26-config-xhzrk" Nov 25 09:57:12 crc kubenswrapper[4854]: I1125 09:57:12.634416 4854 generic.go:334] "Generic (PLEG): container finished" podID="575fe5df-5a76-4633-9688-3997a708f3f4" containerID="a3871b7ff59f7e655f575c3f64e51e999b3cdec859edb86a4be2741b96d9f09f" exitCode=0 Nov 25 09:57:12 crc kubenswrapper[4854]: I1125 09:57:12.634501 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"575fe5df-5a76-4633-9688-3997a708f3f4","Type":"ContainerDied","Data":"a3871b7ff59f7e655f575c3f64e51e999b3cdec859edb86a4be2741b96d9f09f"} Nov 25 09:57:13 crc kubenswrapper[4854]: I1125 09:57:13.378344 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-7dl26" podUID="04573f28-a6e2-46ca-8a02-a2265c5d68e9" containerName="ovn-controller" probeResult="failure" output=< Nov 25 09:57:13 crc kubenswrapper[4854]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 25 09:57:13 crc kubenswrapper[4854]: > Nov 25 09:57:13 crc kubenswrapper[4854]: I1125 09:57:13.637784 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/eb6d8324-0633-4891-9a9c-f782e7cec247-etc-swift\") pod \"swift-storage-0\" (UID: \"eb6d8324-0633-4891-9a9c-f782e7cec247\") " pod="openstack/swift-storage-0" Nov 25 09:57:13 crc kubenswrapper[4854]: I1125 09:57:13.663705 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/eb6d8324-0633-4891-9a9c-f782e7cec247-etc-swift\") pod \"swift-storage-0\" (UID: \"eb6d8324-0633-4891-9a9c-f782e7cec247\") " pod="openstack/swift-storage-0" Nov 25 09:57:13 crc kubenswrapper[4854]: I1125 09:57:13.675339 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 25 09:57:14 crc kubenswrapper[4854]: I1125 09:57:14.839416 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="6894f0be-f53f-401b-8707-4cc0cfd020dc" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.129:5671: connect: connection refused" Nov 25 09:57:14 crc kubenswrapper[4854]: I1125 09:57:14.850180 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-2" podUID="dc586641-37b8-4b9b-8479-a3c552bec71d" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.130:5671: connect: connection refused" Nov 25 09:57:18 crc kubenswrapper[4854]: I1125 09:57:18.439919 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-7dl26-config-xhzrk"] Nov 25 09:57:18 crc kubenswrapper[4854]: W1125 09:57:18.448694 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5737473b_6084_41b0_8cfd_cb590252b6b4.slice/crio-6cf5028040ba3b18a58e92029f1f5bfda514331113f736d6c6c79587304315b7 WatchSource:0}: Error finding container 6cf5028040ba3b18a58e92029f1f5bfda514331113f736d6c6c79587304315b7: Status 404 returned error can't find the container with id 6cf5028040ba3b18a58e92029f1f5bfda514331113f736d6c6c79587304315b7 Nov 25 09:57:18 crc kubenswrapper[4854]: I1125 09:57:18.492860 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-7dl26" podUID="04573f28-a6e2-46ca-8a02-a2265c5d68e9" containerName="ovn-controller" probeResult="failure" output=< Nov 25 09:57:18 crc kubenswrapper[4854]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 25 09:57:18 crc kubenswrapper[4854]: > Nov 25 09:57:18 crc kubenswrapper[4854]: I1125 09:57:18.704040 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-7dl26-config-xhzrk" event={"ID":"5737473b-6084-41b0-8cfd-cb590252b6b4","Type":"ContainerStarted","Data":"6cf5028040ba3b18a58e92029f1f5bfda514331113f736d6c6c79587304315b7"} Nov 25 09:57:18 crc kubenswrapper[4854]: I1125 09:57:18.706450 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"8a1342b2-9a0f-42fe-85f4-d706eb2587d4","Type":"ContainerStarted","Data":"847bbab51518ae8e9b903c06b8d6d932fd6de9eec6bba9ff7eaa2caef4c61e1c"} Nov 25 09:57:18 crc kubenswrapper[4854]: I1125 09:57:18.711245 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 25 09:57:18 crc kubenswrapper[4854]: I1125 09:57:18.716099 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"93c535b4-23bb-4c71-8ddc-1304ca205e55","Type":"ContainerStarted","Data":"80dac60a0c43639e6b5f2caa74912607cd28500ca7d30e1213e8689c204c0396"} Nov 25 09:57:18 crc kubenswrapper[4854]: I1125 09:57:18.721952 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"575fe5df-5a76-4633-9688-3997a708f3f4","Type":"ContainerStarted","Data":"41e467847c880fe581389b94e5ceda22ae9ee9e03a9dbd9ef358e10e52b6dca3"} Nov 25 09:57:18 crc kubenswrapper[4854]: I1125 09:57:18.723044 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:57:18 crc kubenswrapper[4854]: I1125 09:57:18.724300 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-0" podStartSLOduration=1.630793878 podStartE2EDuration="12.724280832s" podCreationTimestamp="2025-11-25 09:57:06 +0000 UTC" firstStartedPulling="2025-11-25 09:57:06.929977906 +0000 UTC m=+1232.782971282" lastFinishedPulling="2025-11-25 09:57:18.02346486 +0000 UTC m=+1243.876458236" observedRunningTime="2025-11-25 09:57:18.723325966 +0000 UTC m=+1244.576319362" watchObservedRunningTime="2025-11-25 09:57:18.724280832 +0000 UTC m=+1244.577274208" Nov 25 09:57:18 crc kubenswrapper[4854]: W1125 09:57:18.726821 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podeb6d8324_0633_4891_9a9c_f782e7cec247.slice/crio-cd721820b8414fdf6526aad477efcddd9cc3925462bbc2dc5088e07c25447940 WatchSource:0}: Error finding container cd721820b8414fdf6526aad477efcddd9cc3925462bbc2dc5088e07c25447940: Status 404 returned error can't find the container with id cd721820b8414fdf6526aad477efcddd9cc3925462bbc2dc5088e07c25447940 Nov 25 09:57:18 crc kubenswrapper[4854]: I1125 09:57:18.755124 4854 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 09:57:18 crc kubenswrapper[4854]: I1125 09:57:18.768082 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=-9223371951.086714 podStartE2EDuration="1m25.768061528s" podCreationTimestamp="2025-11-25 09:55:53 +0000 UTC" firstStartedPulling="2025-11-25 09:56:09.346600625 +0000 UTC m=+1175.199594001" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:57:18.761414375 +0000 UTC m=+1244.614407751" watchObservedRunningTime="2025-11-25 09:57:18.768061528 +0000 UTC m=+1244.621054894" Nov 25 09:57:18 crc kubenswrapper[4854]: I1125 09:57:18.792301 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=12.267293923 podStartE2EDuration="1m18.792282985s" podCreationTimestamp="2025-11-25 09:56:00 +0000 UTC" firstStartedPulling="2025-11-25 09:56:11.498457317 +0000 UTC m=+1177.351450693" lastFinishedPulling="2025-11-25 09:57:18.023446389 +0000 UTC m=+1243.876439755" observedRunningTime="2025-11-25 09:57:18.791056061 +0000 UTC m=+1244.644049437" watchObservedRunningTime="2025-11-25 09:57:18.792282985 +0000 UTC m=+1244.645276361" Nov 25 09:57:19 crc kubenswrapper[4854]: I1125 09:57:19.747845 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-7wk5r" event={"ID":"bd7395cb-72b7-4b86-860c-9ba1b3ccd34e","Type":"ContainerStarted","Data":"86aee007c08258f16d7873af7f2909e890cd1f2fa8bc7a60f57eea84c1b7ff46"} Nov 25 09:57:19 crc kubenswrapper[4854]: I1125 09:57:19.751814 4854 generic.go:334] "Generic (PLEG): container finished" podID="5737473b-6084-41b0-8cfd-cb590252b6b4" containerID="8a2ba695aa65f5595c082832553b14e212611b74d1bd8c5370c264d95f5a39db" exitCode=0 Nov 25 09:57:19 crc kubenswrapper[4854]: I1125 09:57:19.751852 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-7dl26-config-xhzrk" event={"ID":"5737473b-6084-41b0-8cfd-cb590252b6b4","Type":"ContainerDied","Data":"8a2ba695aa65f5595c082832553b14e212611b74d1bd8c5370c264d95f5a39db"} Nov 25 09:57:19 crc kubenswrapper[4854]: I1125 09:57:19.754657 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"eb6d8324-0633-4891-9a9c-f782e7cec247","Type":"ContainerStarted","Data":"cd721820b8414fdf6526aad477efcddd9cc3925462bbc2dc5088e07c25447940"} Nov 25 09:57:19 crc kubenswrapper[4854]: I1125 09:57:19.774811 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-7wk5r" podStartSLOduration=5.454485706 podStartE2EDuration="21.774792382s" podCreationTimestamp="2025-11-25 09:56:58 +0000 UTC" firstStartedPulling="2025-11-25 09:57:01.768535114 +0000 UTC m=+1227.621528490" lastFinishedPulling="2025-11-25 09:57:18.08884179 +0000 UTC m=+1243.941835166" observedRunningTime="2025-11-25 09:57:19.769637661 +0000 UTC m=+1245.622631057" watchObservedRunningTime="2025-11-25 09:57:19.774792382 +0000 UTC m=+1245.627785758" Nov 25 09:57:20 crc kubenswrapper[4854]: I1125 09:57:20.767844 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"eb6d8324-0633-4891-9a9c-f782e7cec247","Type":"ContainerStarted","Data":"eb17691255f648b4c4c8992e7ab47c4402143e8bc4dcf9d1229629360453bc00"} Nov 25 09:57:20 crc kubenswrapper[4854]: I1125 09:57:20.768389 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"eb6d8324-0633-4891-9a9c-f782e7cec247","Type":"ContainerStarted","Data":"e933f6b7d25e1070cdf363ea77cd947bc116415f1f528af566a58e946a3ebc89"} Nov 25 09:57:21 crc kubenswrapper[4854]: I1125 09:57:21.200622 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-7dl26-config-xhzrk" Nov 25 09:57:21 crc kubenswrapper[4854]: I1125 09:57:21.240276 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5737473b-6084-41b0-8cfd-cb590252b6b4-scripts\") pod \"5737473b-6084-41b0-8cfd-cb590252b6b4\" (UID: \"5737473b-6084-41b0-8cfd-cb590252b6b4\") " Nov 25 09:57:21 crc kubenswrapper[4854]: I1125 09:57:21.240327 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jclzn\" (UniqueName: \"kubernetes.io/projected/5737473b-6084-41b0-8cfd-cb590252b6b4-kube-api-access-jclzn\") pod \"5737473b-6084-41b0-8cfd-cb590252b6b4\" (UID: \"5737473b-6084-41b0-8cfd-cb590252b6b4\") " Nov 25 09:57:21 crc kubenswrapper[4854]: I1125 09:57:21.240433 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5737473b-6084-41b0-8cfd-cb590252b6b4-var-log-ovn\") pod \"5737473b-6084-41b0-8cfd-cb590252b6b4\" (UID: \"5737473b-6084-41b0-8cfd-cb590252b6b4\") " Nov 25 09:57:21 crc kubenswrapper[4854]: I1125 09:57:21.240490 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5737473b-6084-41b0-8cfd-cb590252b6b4-var-run\") pod \"5737473b-6084-41b0-8cfd-cb590252b6b4\" (UID: \"5737473b-6084-41b0-8cfd-cb590252b6b4\") " Nov 25 09:57:21 crc kubenswrapper[4854]: I1125 09:57:21.240586 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5737473b-6084-41b0-8cfd-cb590252b6b4-additional-scripts\") pod \"5737473b-6084-41b0-8cfd-cb590252b6b4\" (UID: \"5737473b-6084-41b0-8cfd-cb590252b6b4\") " Nov 25 09:57:21 crc kubenswrapper[4854]: I1125 09:57:21.240742 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5737473b-6084-41b0-8cfd-cb590252b6b4-var-run-ovn\") pod \"5737473b-6084-41b0-8cfd-cb590252b6b4\" (UID: \"5737473b-6084-41b0-8cfd-cb590252b6b4\") " Nov 25 09:57:21 crc kubenswrapper[4854]: I1125 09:57:21.241235 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5737473b-6084-41b0-8cfd-cb590252b6b4-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "5737473b-6084-41b0-8cfd-cb590252b6b4" (UID: "5737473b-6084-41b0-8cfd-cb590252b6b4"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:57:21 crc kubenswrapper[4854]: I1125 09:57:21.241803 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5737473b-6084-41b0-8cfd-cb590252b6b4-scripts" (OuterVolumeSpecName: "scripts") pod "5737473b-6084-41b0-8cfd-cb590252b6b4" (UID: "5737473b-6084-41b0-8cfd-cb590252b6b4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:57:21 crc kubenswrapper[4854]: I1125 09:57:21.241878 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5737473b-6084-41b0-8cfd-cb590252b6b4-var-run" (OuterVolumeSpecName: "var-run") pod "5737473b-6084-41b0-8cfd-cb590252b6b4" (UID: "5737473b-6084-41b0-8cfd-cb590252b6b4"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:57:21 crc kubenswrapper[4854]: I1125 09:57:21.241968 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5737473b-6084-41b0-8cfd-cb590252b6b4-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "5737473b-6084-41b0-8cfd-cb590252b6b4" (UID: "5737473b-6084-41b0-8cfd-cb590252b6b4"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:57:21 crc kubenswrapper[4854]: I1125 09:57:21.242371 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5737473b-6084-41b0-8cfd-cb590252b6b4-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "5737473b-6084-41b0-8cfd-cb590252b6b4" (UID: "5737473b-6084-41b0-8cfd-cb590252b6b4"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:57:21 crc kubenswrapper[4854]: I1125 09:57:21.274936 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5737473b-6084-41b0-8cfd-cb590252b6b4-kube-api-access-jclzn" (OuterVolumeSpecName: "kube-api-access-jclzn") pod "5737473b-6084-41b0-8cfd-cb590252b6b4" (UID: "5737473b-6084-41b0-8cfd-cb590252b6b4"). InnerVolumeSpecName "kube-api-access-jclzn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:57:21 crc kubenswrapper[4854]: I1125 09:57:21.343548 4854 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5737473b-6084-41b0-8cfd-cb590252b6b4-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:21 crc kubenswrapper[4854]: I1125 09:57:21.343588 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jclzn\" (UniqueName: \"kubernetes.io/projected/5737473b-6084-41b0-8cfd-cb590252b6b4-kube-api-access-jclzn\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:21 crc kubenswrapper[4854]: I1125 09:57:21.343600 4854 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5737473b-6084-41b0-8cfd-cb590252b6b4-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:21 crc kubenswrapper[4854]: I1125 09:57:21.343610 4854 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5737473b-6084-41b0-8cfd-cb590252b6b4-var-run\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:21 crc kubenswrapper[4854]: I1125 09:57:21.343622 4854 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5737473b-6084-41b0-8cfd-cb590252b6b4-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:21 crc kubenswrapper[4854]: I1125 09:57:21.343633 4854 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5737473b-6084-41b0-8cfd-cb590252b6b4-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:21 crc kubenswrapper[4854]: I1125 09:57:21.782435 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"eb6d8324-0633-4891-9a9c-f782e7cec247","Type":"ContainerStarted","Data":"2acfac8e20fe9556ba5e9ac5b0dabafb8fb1f8cdd3bbcb33582065e40787be7c"} Nov 25 09:57:21 crc kubenswrapper[4854]: I1125 09:57:21.782733 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"eb6d8324-0633-4891-9a9c-f782e7cec247","Type":"ContainerStarted","Data":"0803e45a1def07e17b794d5cfc2d21c760cc9127fda0ad9ea67c8dd257487bd6"} Nov 25 09:57:21 crc kubenswrapper[4854]: I1125 09:57:21.783951 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-7dl26-config-xhzrk" event={"ID":"5737473b-6084-41b0-8cfd-cb590252b6b4","Type":"ContainerDied","Data":"6cf5028040ba3b18a58e92029f1f5bfda514331113f736d6c6c79587304315b7"} Nov 25 09:57:21 crc kubenswrapper[4854]: I1125 09:57:21.783975 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6cf5028040ba3b18a58e92029f1f5bfda514331113f736d6c6c79587304315b7" Nov 25 09:57:21 crc kubenswrapper[4854]: I1125 09:57:21.784038 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-7dl26-config-xhzrk" Nov 25 09:57:21 crc kubenswrapper[4854]: I1125 09:57:21.915833 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:22 crc kubenswrapper[4854]: I1125 09:57:22.339464 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-7dl26-config-xhzrk"] Nov 25 09:57:22 crc kubenswrapper[4854]: I1125 09:57:22.355169 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-7dl26-config-xhzrk"] Nov 25 09:57:22 crc kubenswrapper[4854]: I1125 09:57:22.443554 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-7dl26-config-stkvp"] Nov 25 09:57:22 crc kubenswrapper[4854]: E1125 09:57:22.444110 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5737473b-6084-41b0-8cfd-cb590252b6b4" containerName="ovn-config" Nov 25 09:57:22 crc kubenswrapper[4854]: I1125 09:57:22.444139 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="5737473b-6084-41b0-8cfd-cb590252b6b4" containerName="ovn-config" Nov 25 09:57:22 crc kubenswrapper[4854]: I1125 09:57:22.444403 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="5737473b-6084-41b0-8cfd-cb590252b6b4" containerName="ovn-config" Nov 25 09:57:22 crc kubenswrapper[4854]: I1125 09:57:22.445353 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-7dl26-config-stkvp" Nov 25 09:57:22 crc kubenswrapper[4854]: I1125 09:57:22.447665 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 25 09:57:22 crc kubenswrapper[4854]: I1125 09:57:22.455200 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-7dl26-config-stkvp"] Nov 25 09:57:22 crc kubenswrapper[4854]: I1125 09:57:22.508036 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/0b7a407a-c56f-46eb-bec0-b66542205a02-additional-scripts\") pod \"ovn-controller-7dl26-config-stkvp\" (UID: \"0b7a407a-c56f-46eb-bec0-b66542205a02\") " pod="openstack/ovn-controller-7dl26-config-stkvp" Nov 25 09:57:22 crc kubenswrapper[4854]: I1125 09:57:22.513804 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/0b7a407a-c56f-46eb-bec0-b66542205a02-var-run\") pod \"ovn-controller-7dl26-config-stkvp\" (UID: \"0b7a407a-c56f-46eb-bec0-b66542205a02\") " pod="openstack/ovn-controller-7dl26-config-stkvp" Nov 25 09:57:22 crc kubenswrapper[4854]: I1125 09:57:22.513987 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0b7a407a-c56f-46eb-bec0-b66542205a02-scripts\") pod \"ovn-controller-7dl26-config-stkvp\" (UID: \"0b7a407a-c56f-46eb-bec0-b66542205a02\") " pod="openstack/ovn-controller-7dl26-config-stkvp" Nov 25 09:57:22 crc kubenswrapper[4854]: I1125 09:57:22.514104 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9428c\" (UniqueName: \"kubernetes.io/projected/0b7a407a-c56f-46eb-bec0-b66542205a02-kube-api-access-9428c\") pod \"ovn-controller-7dl26-config-stkvp\" (UID: \"0b7a407a-c56f-46eb-bec0-b66542205a02\") " pod="openstack/ovn-controller-7dl26-config-stkvp" Nov 25 09:57:22 crc kubenswrapper[4854]: I1125 09:57:22.524123 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/0b7a407a-c56f-46eb-bec0-b66542205a02-var-log-ovn\") pod \"ovn-controller-7dl26-config-stkvp\" (UID: \"0b7a407a-c56f-46eb-bec0-b66542205a02\") " pod="openstack/ovn-controller-7dl26-config-stkvp" Nov 25 09:57:22 crc kubenswrapper[4854]: I1125 09:57:22.524655 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/0b7a407a-c56f-46eb-bec0-b66542205a02-var-run-ovn\") pod \"ovn-controller-7dl26-config-stkvp\" (UID: \"0b7a407a-c56f-46eb-bec0-b66542205a02\") " pod="openstack/ovn-controller-7dl26-config-stkvp" Nov 25 09:57:22 crc kubenswrapper[4854]: I1125 09:57:22.626829 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9428c\" (UniqueName: \"kubernetes.io/projected/0b7a407a-c56f-46eb-bec0-b66542205a02-kube-api-access-9428c\") pod \"ovn-controller-7dl26-config-stkvp\" (UID: \"0b7a407a-c56f-46eb-bec0-b66542205a02\") " pod="openstack/ovn-controller-7dl26-config-stkvp" Nov 25 09:57:22 crc kubenswrapper[4854]: I1125 09:57:22.627264 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/0b7a407a-c56f-46eb-bec0-b66542205a02-var-log-ovn\") pod \"ovn-controller-7dl26-config-stkvp\" (UID: \"0b7a407a-c56f-46eb-bec0-b66542205a02\") " pod="openstack/ovn-controller-7dl26-config-stkvp" Nov 25 09:57:22 crc kubenswrapper[4854]: I1125 09:57:22.627411 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/0b7a407a-c56f-46eb-bec0-b66542205a02-var-run-ovn\") pod \"ovn-controller-7dl26-config-stkvp\" (UID: \"0b7a407a-c56f-46eb-bec0-b66542205a02\") " pod="openstack/ovn-controller-7dl26-config-stkvp" Nov 25 09:57:22 crc kubenswrapper[4854]: I1125 09:57:22.628504 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/0b7a407a-c56f-46eb-bec0-b66542205a02-additional-scripts\") pod \"ovn-controller-7dl26-config-stkvp\" (UID: \"0b7a407a-c56f-46eb-bec0-b66542205a02\") " pod="openstack/ovn-controller-7dl26-config-stkvp" Nov 25 09:57:22 crc kubenswrapper[4854]: I1125 09:57:22.627610 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/0b7a407a-c56f-46eb-bec0-b66542205a02-var-run-ovn\") pod \"ovn-controller-7dl26-config-stkvp\" (UID: \"0b7a407a-c56f-46eb-bec0-b66542205a02\") " pod="openstack/ovn-controller-7dl26-config-stkvp" Nov 25 09:57:22 crc kubenswrapper[4854]: I1125 09:57:22.627538 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/0b7a407a-c56f-46eb-bec0-b66542205a02-var-log-ovn\") pod \"ovn-controller-7dl26-config-stkvp\" (UID: \"0b7a407a-c56f-46eb-bec0-b66542205a02\") " pod="openstack/ovn-controller-7dl26-config-stkvp" Nov 25 09:57:22 crc kubenswrapper[4854]: I1125 09:57:22.629152 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/0b7a407a-c56f-46eb-bec0-b66542205a02-var-run\") pod \"ovn-controller-7dl26-config-stkvp\" (UID: \"0b7a407a-c56f-46eb-bec0-b66542205a02\") " pod="openstack/ovn-controller-7dl26-config-stkvp" Nov 25 09:57:22 crc kubenswrapper[4854]: I1125 09:57:22.629249 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0b7a407a-c56f-46eb-bec0-b66542205a02-scripts\") pod \"ovn-controller-7dl26-config-stkvp\" (UID: \"0b7a407a-c56f-46eb-bec0-b66542205a02\") " pod="openstack/ovn-controller-7dl26-config-stkvp" Nov 25 09:57:22 crc kubenswrapper[4854]: I1125 09:57:22.629329 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/0b7a407a-c56f-46eb-bec0-b66542205a02-additional-scripts\") pod \"ovn-controller-7dl26-config-stkvp\" (UID: \"0b7a407a-c56f-46eb-bec0-b66542205a02\") " pod="openstack/ovn-controller-7dl26-config-stkvp" Nov 25 09:57:22 crc kubenswrapper[4854]: I1125 09:57:22.629423 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/0b7a407a-c56f-46eb-bec0-b66542205a02-var-run\") pod \"ovn-controller-7dl26-config-stkvp\" (UID: \"0b7a407a-c56f-46eb-bec0-b66542205a02\") " pod="openstack/ovn-controller-7dl26-config-stkvp" Nov 25 09:57:22 crc kubenswrapper[4854]: I1125 09:57:22.630998 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0b7a407a-c56f-46eb-bec0-b66542205a02-scripts\") pod \"ovn-controller-7dl26-config-stkvp\" (UID: \"0b7a407a-c56f-46eb-bec0-b66542205a02\") " pod="openstack/ovn-controller-7dl26-config-stkvp" Nov 25 09:57:22 crc kubenswrapper[4854]: I1125 09:57:22.652444 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9428c\" (UniqueName: \"kubernetes.io/projected/0b7a407a-c56f-46eb-bec0-b66542205a02-kube-api-access-9428c\") pod \"ovn-controller-7dl26-config-stkvp\" (UID: \"0b7a407a-c56f-46eb-bec0-b66542205a02\") " pod="openstack/ovn-controller-7dl26-config-stkvp" Nov 25 09:57:22 crc kubenswrapper[4854]: I1125 09:57:22.809434 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-7dl26-config-stkvp" Nov 25 09:57:23 crc kubenswrapper[4854]: I1125 09:57:23.026881 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5737473b-6084-41b0-8cfd-cb590252b6b4" path="/var/lib/kubelet/pods/5737473b-6084-41b0-8cfd-cb590252b6b4/volumes" Nov 25 09:57:23 crc kubenswrapper[4854]: I1125 09:57:23.360940 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-7dl26" Nov 25 09:57:23 crc kubenswrapper[4854]: I1125 09:57:23.579341 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-7dl26-config-stkvp"] Nov 25 09:57:23 crc kubenswrapper[4854]: W1125 09:57:23.599519 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0b7a407a_c56f_46eb_bec0_b66542205a02.slice/crio-7e213c1bc1752d720bb4556778027fd52183597b2bfe35be9ea4218c9c827564 WatchSource:0}: Error finding container 7e213c1bc1752d720bb4556778027fd52183597b2bfe35be9ea4218c9c827564: Status 404 returned error can't find the container with id 7e213c1bc1752d720bb4556778027fd52183597b2bfe35be9ea4218c9c827564 Nov 25 09:57:23 crc kubenswrapper[4854]: I1125 09:57:23.808531 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"eb6d8324-0633-4891-9a9c-f782e7cec247","Type":"ContainerStarted","Data":"6106b6f5bf0571845478824f5ac860383e038f5dd90ae387a3ce97af9ea33ca9"} Nov 25 09:57:23 crc kubenswrapper[4854]: I1125 09:57:23.808882 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"eb6d8324-0633-4891-9a9c-f782e7cec247","Type":"ContainerStarted","Data":"614db23acfe1316a6f7ee639df6b54943f30372ce5f84e426cb773373a254555"} Nov 25 09:57:23 crc kubenswrapper[4854]: I1125 09:57:23.810862 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-7dl26-config-stkvp" event={"ID":"0b7a407a-c56f-46eb-bec0-b66542205a02","Type":"ContainerStarted","Data":"7e213c1bc1752d720bb4556778027fd52183597b2bfe35be9ea4218c9c827564"} Nov 25 09:57:24 crc kubenswrapper[4854]: I1125 09:57:24.823245 4854 generic.go:334] "Generic (PLEG): container finished" podID="0b7a407a-c56f-46eb-bec0-b66542205a02" containerID="fe24d6444f38d01e85737fd96dfd356629d78a085cd6fce42d85db11a1f92bbe" exitCode=0 Nov 25 09:57:24 crc kubenswrapper[4854]: I1125 09:57:24.824243 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-7dl26-config-stkvp" event={"ID":"0b7a407a-c56f-46eb-bec0-b66542205a02","Type":"ContainerDied","Data":"fe24d6444f38d01e85737fd96dfd356629d78a085cd6fce42d85db11a1f92bbe"} Nov 25 09:57:24 crc kubenswrapper[4854]: I1125 09:57:24.828576 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"eb6d8324-0633-4891-9a9c-f782e7cec247","Type":"ContainerStarted","Data":"7dc1274ecf7bc9fa1396aee8c8074144cc21597d0c4bd1f4131bd00c1d90306c"} Nov 25 09:57:24 crc kubenswrapper[4854]: I1125 09:57:24.828852 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"eb6d8324-0633-4891-9a9c-f782e7cec247","Type":"ContainerStarted","Data":"d63e30102d43dad62cd33953c5c45545d363a87c2eaad70aacf84faad09df412"} Nov 25 09:57:24 crc kubenswrapper[4854]: I1125 09:57:24.840869 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 25 09:57:24 crc kubenswrapper[4854]: I1125 09:57:24.848160 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-2" podUID="dc586641-37b8-4b9b-8479-a3c552bec71d" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.130:5671: connect: connection refused" Nov 25 09:57:24 crc kubenswrapper[4854]: I1125 09:57:24.872017 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-1" podUID="c24229dd-3c9c-47b6-8080-a1d51e0e6868" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.131:5671: connect: connection refused" Nov 25 09:57:25 crc kubenswrapper[4854]: I1125 09:57:25.842575 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"eb6d8324-0633-4891-9a9c-f782e7cec247","Type":"ContainerStarted","Data":"0ca77ffde629edb3a41d2c8e6f3487eac2b147502f2c66ead22e4b8633b7df00"} Nov 25 09:57:26 crc kubenswrapper[4854]: I1125 09:57:26.257119 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-7dl26-config-stkvp" Nov 25 09:57:26 crc kubenswrapper[4854]: I1125 09:57:26.312511 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/0b7a407a-c56f-46eb-bec0-b66542205a02-var-run-ovn\") pod \"0b7a407a-c56f-46eb-bec0-b66542205a02\" (UID: \"0b7a407a-c56f-46eb-bec0-b66542205a02\") " Nov 25 09:57:26 crc kubenswrapper[4854]: I1125 09:57:26.312663 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/0b7a407a-c56f-46eb-bec0-b66542205a02-additional-scripts\") pod \"0b7a407a-c56f-46eb-bec0-b66542205a02\" (UID: \"0b7a407a-c56f-46eb-bec0-b66542205a02\") " Nov 25 09:57:26 crc kubenswrapper[4854]: I1125 09:57:26.312755 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9428c\" (UniqueName: \"kubernetes.io/projected/0b7a407a-c56f-46eb-bec0-b66542205a02-kube-api-access-9428c\") pod \"0b7a407a-c56f-46eb-bec0-b66542205a02\" (UID: \"0b7a407a-c56f-46eb-bec0-b66542205a02\") " Nov 25 09:57:26 crc kubenswrapper[4854]: I1125 09:57:26.312797 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0b7a407a-c56f-46eb-bec0-b66542205a02-scripts\") pod \"0b7a407a-c56f-46eb-bec0-b66542205a02\" (UID: \"0b7a407a-c56f-46eb-bec0-b66542205a02\") " Nov 25 09:57:26 crc kubenswrapper[4854]: I1125 09:57:26.312828 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/0b7a407a-c56f-46eb-bec0-b66542205a02-var-log-ovn\") pod \"0b7a407a-c56f-46eb-bec0-b66542205a02\" (UID: \"0b7a407a-c56f-46eb-bec0-b66542205a02\") " Nov 25 09:57:26 crc kubenswrapper[4854]: I1125 09:57:26.312867 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/0b7a407a-c56f-46eb-bec0-b66542205a02-var-run\") pod \"0b7a407a-c56f-46eb-bec0-b66542205a02\" (UID: \"0b7a407a-c56f-46eb-bec0-b66542205a02\") " Nov 25 09:57:26 crc kubenswrapper[4854]: I1125 09:57:26.313494 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0b7a407a-c56f-46eb-bec0-b66542205a02-var-run" (OuterVolumeSpecName: "var-run") pod "0b7a407a-c56f-46eb-bec0-b66542205a02" (UID: "0b7a407a-c56f-46eb-bec0-b66542205a02"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:57:26 crc kubenswrapper[4854]: I1125 09:57:26.313535 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0b7a407a-c56f-46eb-bec0-b66542205a02-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "0b7a407a-c56f-46eb-bec0-b66542205a02" (UID: "0b7a407a-c56f-46eb-bec0-b66542205a02"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:57:26 crc kubenswrapper[4854]: I1125 09:57:26.314321 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b7a407a-c56f-46eb-bec0-b66542205a02-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "0b7a407a-c56f-46eb-bec0-b66542205a02" (UID: "0b7a407a-c56f-46eb-bec0-b66542205a02"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:57:26 crc kubenswrapper[4854]: I1125 09:57:26.315562 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b7a407a-c56f-46eb-bec0-b66542205a02-scripts" (OuterVolumeSpecName: "scripts") pod "0b7a407a-c56f-46eb-bec0-b66542205a02" (UID: "0b7a407a-c56f-46eb-bec0-b66542205a02"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:57:26 crc kubenswrapper[4854]: I1125 09:57:26.315611 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0b7a407a-c56f-46eb-bec0-b66542205a02-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "0b7a407a-c56f-46eb-bec0-b66542205a02" (UID: "0b7a407a-c56f-46eb-bec0-b66542205a02"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:57:26 crc kubenswrapper[4854]: I1125 09:57:26.319553 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b7a407a-c56f-46eb-bec0-b66542205a02-kube-api-access-9428c" (OuterVolumeSpecName: "kube-api-access-9428c") pod "0b7a407a-c56f-46eb-bec0-b66542205a02" (UID: "0b7a407a-c56f-46eb-bec0-b66542205a02"). InnerVolumeSpecName "kube-api-access-9428c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:57:26 crc kubenswrapper[4854]: I1125 09:57:26.415226 4854 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/0b7a407a-c56f-46eb-bec0-b66542205a02-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:26 crc kubenswrapper[4854]: I1125 09:57:26.415256 4854 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/0b7a407a-c56f-46eb-bec0-b66542205a02-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:26 crc kubenswrapper[4854]: I1125 09:57:26.415266 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9428c\" (UniqueName: \"kubernetes.io/projected/0b7a407a-c56f-46eb-bec0-b66542205a02-kube-api-access-9428c\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:26 crc kubenswrapper[4854]: I1125 09:57:26.415275 4854 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0b7a407a-c56f-46eb-bec0-b66542205a02-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:26 crc kubenswrapper[4854]: I1125 09:57:26.415283 4854 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/0b7a407a-c56f-46eb-bec0-b66542205a02-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:26 crc kubenswrapper[4854]: I1125 09:57:26.415295 4854 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/0b7a407a-c56f-46eb-bec0-b66542205a02-var-run\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:26 crc kubenswrapper[4854]: I1125 09:57:26.855537 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-7dl26-config-stkvp" event={"ID":"0b7a407a-c56f-46eb-bec0-b66542205a02","Type":"ContainerDied","Data":"7e213c1bc1752d720bb4556778027fd52183597b2bfe35be9ea4218c9c827564"} Nov 25 09:57:26 crc kubenswrapper[4854]: I1125 09:57:26.855869 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7e213c1bc1752d720bb4556778027fd52183597b2bfe35be9ea4218c9c827564" Nov 25 09:57:26 crc kubenswrapper[4854]: I1125 09:57:26.855558 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-7dl26-config-stkvp" Nov 25 09:57:26 crc kubenswrapper[4854]: I1125 09:57:26.860849 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"eb6d8324-0633-4891-9a9c-f782e7cec247","Type":"ContainerStarted","Data":"a0f0ac90cb449a58759c024f01cfca18a2f10cee80badcb389a7b67e26ba2984"} Nov 25 09:57:26 crc kubenswrapper[4854]: I1125 09:57:26.860916 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"eb6d8324-0633-4891-9a9c-f782e7cec247","Type":"ContainerStarted","Data":"24bad8349f812b3d23c47bb20cb20694fca5dd78b9b1da05dfc1a14b3d727aa9"} Nov 25 09:57:26 crc kubenswrapper[4854]: I1125 09:57:26.860929 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"eb6d8324-0633-4891-9a9c-f782e7cec247","Type":"ContainerStarted","Data":"ba9b31194db81e99f68dc7549cfc357d779ca0a289b0437c0bc75605b92a8dbc"} Nov 25 09:57:26 crc kubenswrapper[4854]: I1125 09:57:26.860940 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"eb6d8324-0633-4891-9a9c-f782e7cec247","Type":"ContainerStarted","Data":"4ddc0e4bfe81288bd8949fa0ba57e8566f28ba9315aca1329d249082d78a42b9"} Nov 25 09:57:26 crc kubenswrapper[4854]: I1125 09:57:26.860950 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"eb6d8324-0633-4891-9a9c-f782e7cec247","Type":"ContainerStarted","Data":"e55af6149760154ca4cef83edc884f25a24377488bdabdaef67b2d1d76ce62e4"} Nov 25 09:57:27 crc kubenswrapper[4854]: I1125 09:57:27.343652 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-7dl26-config-stkvp"] Nov 25 09:57:27 crc kubenswrapper[4854]: I1125 09:57:27.356021 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-7dl26-config-stkvp"] Nov 25 09:57:27 crc kubenswrapper[4854]: I1125 09:57:27.905336 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"eb6d8324-0633-4891-9a9c-f782e7cec247","Type":"ContainerStarted","Data":"71813324b6d1d2ec14f6645a6512a57e14e438388ff624e670eeb3b81fa2ad41"} Nov 25 09:57:27 crc kubenswrapper[4854]: I1125 09:57:27.960987 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=41.290073076 podStartE2EDuration="47.960954901s" podCreationTimestamp="2025-11-25 09:56:40 +0000 UTC" firstStartedPulling="2025-11-25 09:57:18.754827403 +0000 UTC m=+1244.607820779" lastFinishedPulling="2025-11-25 09:57:25.425709218 +0000 UTC m=+1251.278702604" observedRunningTime="2025-11-25 09:57:27.947602833 +0000 UTC m=+1253.800596259" watchObservedRunningTime="2025-11-25 09:57:27.960954901 +0000 UTC m=+1253.813948277" Nov 25 09:57:28 crc kubenswrapper[4854]: I1125 09:57:28.211097 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-4zd55"] Nov 25 09:57:28 crc kubenswrapper[4854]: E1125 09:57:28.211810 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b7a407a-c56f-46eb-bec0-b66542205a02" containerName="ovn-config" Nov 25 09:57:28 crc kubenswrapper[4854]: I1125 09:57:28.211893 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b7a407a-c56f-46eb-bec0-b66542205a02" containerName="ovn-config" Nov 25 09:57:28 crc kubenswrapper[4854]: I1125 09:57:28.212188 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b7a407a-c56f-46eb-bec0-b66542205a02" containerName="ovn-config" Nov 25 09:57:28 crc kubenswrapper[4854]: I1125 09:57:28.215003 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-4zd55" Nov 25 09:57:28 crc kubenswrapper[4854]: I1125 09:57:28.225811 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-4zd55"] Nov 25 09:57:28 crc kubenswrapper[4854]: I1125 09:57:28.266120 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Nov 25 09:57:28 crc kubenswrapper[4854]: I1125 09:57:28.368622 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b72fc225-1778-455c-9696-fb610d606860-config\") pod \"dnsmasq-dns-5c79d794d7-4zd55\" (UID: \"b72fc225-1778-455c-9696-fb610d606860\") " pod="openstack/dnsmasq-dns-5c79d794d7-4zd55" Nov 25 09:57:28 crc kubenswrapper[4854]: I1125 09:57:28.368727 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b72fc225-1778-455c-9696-fb610d606860-ovsdbserver-nb\") pod \"dnsmasq-dns-5c79d794d7-4zd55\" (UID: \"b72fc225-1778-455c-9696-fb610d606860\") " pod="openstack/dnsmasq-dns-5c79d794d7-4zd55" Nov 25 09:57:28 crc kubenswrapper[4854]: I1125 09:57:28.368812 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4srbr\" (UniqueName: \"kubernetes.io/projected/b72fc225-1778-455c-9696-fb610d606860-kube-api-access-4srbr\") pod \"dnsmasq-dns-5c79d794d7-4zd55\" (UID: \"b72fc225-1778-455c-9696-fb610d606860\") " pod="openstack/dnsmasq-dns-5c79d794d7-4zd55" Nov 25 09:57:28 crc kubenswrapper[4854]: I1125 09:57:28.368835 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b72fc225-1778-455c-9696-fb610d606860-dns-svc\") pod \"dnsmasq-dns-5c79d794d7-4zd55\" (UID: \"b72fc225-1778-455c-9696-fb610d606860\") " pod="openstack/dnsmasq-dns-5c79d794d7-4zd55" Nov 25 09:57:28 crc kubenswrapper[4854]: I1125 09:57:28.368864 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b72fc225-1778-455c-9696-fb610d606860-dns-swift-storage-0\") pod \"dnsmasq-dns-5c79d794d7-4zd55\" (UID: \"b72fc225-1778-455c-9696-fb610d606860\") " pod="openstack/dnsmasq-dns-5c79d794d7-4zd55" Nov 25 09:57:28 crc kubenswrapper[4854]: I1125 09:57:28.368901 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b72fc225-1778-455c-9696-fb610d606860-ovsdbserver-sb\") pod \"dnsmasq-dns-5c79d794d7-4zd55\" (UID: \"b72fc225-1778-455c-9696-fb610d606860\") " pod="openstack/dnsmasq-dns-5c79d794d7-4zd55" Nov 25 09:57:28 crc kubenswrapper[4854]: I1125 09:57:28.472033 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4srbr\" (UniqueName: \"kubernetes.io/projected/b72fc225-1778-455c-9696-fb610d606860-kube-api-access-4srbr\") pod \"dnsmasq-dns-5c79d794d7-4zd55\" (UID: \"b72fc225-1778-455c-9696-fb610d606860\") " pod="openstack/dnsmasq-dns-5c79d794d7-4zd55" Nov 25 09:57:28 crc kubenswrapper[4854]: I1125 09:57:28.472111 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b72fc225-1778-455c-9696-fb610d606860-dns-svc\") pod \"dnsmasq-dns-5c79d794d7-4zd55\" (UID: \"b72fc225-1778-455c-9696-fb610d606860\") " pod="openstack/dnsmasq-dns-5c79d794d7-4zd55" Nov 25 09:57:28 crc kubenswrapper[4854]: I1125 09:57:28.472164 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b72fc225-1778-455c-9696-fb610d606860-dns-swift-storage-0\") pod \"dnsmasq-dns-5c79d794d7-4zd55\" (UID: \"b72fc225-1778-455c-9696-fb610d606860\") " pod="openstack/dnsmasq-dns-5c79d794d7-4zd55" Nov 25 09:57:28 crc kubenswrapper[4854]: I1125 09:57:28.472239 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b72fc225-1778-455c-9696-fb610d606860-ovsdbserver-sb\") pod \"dnsmasq-dns-5c79d794d7-4zd55\" (UID: \"b72fc225-1778-455c-9696-fb610d606860\") " pod="openstack/dnsmasq-dns-5c79d794d7-4zd55" Nov 25 09:57:28 crc kubenswrapper[4854]: I1125 09:57:28.472349 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b72fc225-1778-455c-9696-fb610d606860-config\") pod \"dnsmasq-dns-5c79d794d7-4zd55\" (UID: \"b72fc225-1778-455c-9696-fb610d606860\") " pod="openstack/dnsmasq-dns-5c79d794d7-4zd55" Nov 25 09:57:28 crc kubenswrapper[4854]: I1125 09:57:28.472416 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b72fc225-1778-455c-9696-fb610d606860-ovsdbserver-nb\") pod \"dnsmasq-dns-5c79d794d7-4zd55\" (UID: \"b72fc225-1778-455c-9696-fb610d606860\") " pod="openstack/dnsmasq-dns-5c79d794d7-4zd55" Nov 25 09:57:28 crc kubenswrapper[4854]: I1125 09:57:28.473308 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b72fc225-1778-455c-9696-fb610d606860-dns-svc\") pod \"dnsmasq-dns-5c79d794d7-4zd55\" (UID: \"b72fc225-1778-455c-9696-fb610d606860\") " pod="openstack/dnsmasq-dns-5c79d794d7-4zd55" Nov 25 09:57:28 crc kubenswrapper[4854]: I1125 09:57:28.473363 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b72fc225-1778-455c-9696-fb610d606860-ovsdbserver-nb\") pod \"dnsmasq-dns-5c79d794d7-4zd55\" (UID: \"b72fc225-1778-455c-9696-fb610d606860\") " pod="openstack/dnsmasq-dns-5c79d794d7-4zd55" Nov 25 09:57:28 crc kubenswrapper[4854]: I1125 09:57:28.473901 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b72fc225-1778-455c-9696-fb610d606860-config\") pod \"dnsmasq-dns-5c79d794d7-4zd55\" (UID: \"b72fc225-1778-455c-9696-fb610d606860\") " pod="openstack/dnsmasq-dns-5c79d794d7-4zd55" Nov 25 09:57:28 crc kubenswrapper[4854]: I1125 09:57:28.474159 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b72fc225-1778-455c-9696-fb610d606860-dns-swift-storage-0\") pod \"dnsmasq-dns-5c79d794d7-4zd55\" (UID: \"b72fc225-1778-455c-9696-fb610d606860\") " pod="openstack/dnsmasq-dns-5c79d794d7-4zd55" Nov 25 09:57:28 crc kubenswrapper[4854]: I1125 09:57:28.474841 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b72fc225-1778-455c-9696-fb610d606860-ovsdbserver-sb\") pod \"dnsmasq-dns-5c79d794d7-4zd55\" (UID: \"b72fc225-1778-455c-9696-fb610d606860\") " pod="openstack/dnsmasq-dns-5c79d794d7-4zd55" Nov 25 09:57:28 crc kubenswrapper[4854]: I1125 09:57:28.491929 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4srbr\" (UniqueName: \"kubernetes.io/projected/b72fc225-1778-455c-9696-fb610d606860-kube-api-access-4srbr\") pod \"dnsmasq-dns-5c79d794d7-4zd55\" (UID: \"b72fc225-1778-455c-9696-fb610d606860\") " pod="openstack/dnsmasq-dns-5c79d794d7-4zd55" Nov 25 09:57:28 crc kubenswrapper[4854]: I1125 09:57:28.583076 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-4zd55" Nov 25 09:57:29 crc kubenswrapper[4854]: I1125 09:57:29.027917 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b7a407a-c56f-46eb-bec0-b66542205a02" path="/var/lib/kubelet/pods/0b7a407a-c56f-46eb-bec0-b66542205a02/volumes" Nov 25 09:57:29 crc kubenswrapper[4854]: I1125 09:57:29.045658 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-4zd55"] Nov 25 09:57:29 crc kubenswrapper[4854]: W1125 09:57:29.047209 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb72fc225_1778_455c_9696_fb610d606860.slice/crio-ed91b1f1438f05136483af8f0e150986cbf45603df1fb80f930778e287de53bd WatchSource:0}: Error finding container ed91b1f1438f05136483af8f0e150986cbf45603df1fb80f930778e287de53bd: Status 404 returned error can't find the container with id ed91b1f1438f05136483af8f0e150986cbf45603df1fb80f930778e287de53bd Nov 25 09:57:29 crc kubenswrapper[4854]: I1125 09:57:29.926114 4854 generic.go:334] "Generic (PLEG): container finished" podID="b72fc225-1778-455c-9696-fb610d606860" containerID="279f9ea18e91e1e62edfc6ae91746c0cebf0b2c20fde5eca3b52626ed83ad1ca" exitCode=0 Nov 25 09:57:29 crc kubenswrapper[4854]: I1125 09:57:29.926221 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-4zd55" event={"ID":"b72fc225-1778-455c-9696-fb610d606860","Type":"ContainerDied","Data":"279f9ea18e91e1e62edfc6ae91746c0cebf0b2c20fde5eca3b52626ed83ad1ca"} Nov 25 09:57:29 crc kubenswrapper[4854]: I1125 09:57:29.926577 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-4zd55" event={"ID":"b72fc225-1778-455c-9696-fb610d606860","Type":"ContainerStarted","Data":"ed91b1f1438f05136483af8f0e150986cbf45603df1fb80f930778e287de53bd"} Nov 25 09:57:30 crc kubenswrapper[4854]: I1125 09:57:30.940231 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-4zd55" event={"ID":"b72fc225-1778-455c-9696-fb610d606860","Type":"ContainerStarted","Data":"baad88c0f04e91c5fee5166751f6ceb71c4d76cc271c0a14ed4e2fe67b4d7a37"} Nov 25 09:57:30 crc kubenswrapper[4854]: I1125 09:57:30.940822 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5c79d794d7-4zd55" Nov 25 09:57:30 crc kubenswrapper[4854]: I1125 09:57:30.942331 4854 generic.go:334] "Generic (PLEG): container finished" podID="bd7395cb-72b7-4b86-860c-9ba1b3ccd34e" containerID="86aee007c08258f16d7873af7f2909e890cd1f2fa8bc7a60f57eea84c1b7ff46" exitCode=0 Nov 25 09:57:30 crc kubenswrapper[4854]: I1125 09:57:30.942379 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-7wk5r" event={"ID":"bd7395cb-72b7-4b86-860c-9ba1b3ccd34e","Type":"ContainerDied","Data":"86aee007c08258f16d7873af7f2909e890cd1f2fa8bc7a60f57eea84c1b7ff46"} Nov 25 09:57:30 crc kubenswrapper[4854]: I1125 09:57:30.980006 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5c79d794d7-4zd55" podStartSLOduration=2.979982421 podStartE2EDuration="2.979982421s" podCreationTimestamp="2025-11-25 09:57:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:57:30.975521169 +0000 UTC m=+1256.828514585" watchObservedRunningTime="2025-11-25 09:57:30.979982421 +0000 UTC m=+1256.832975797" Nov 25 09:57:31 crc kubenswrapper[4854]: I1125 09:57:31.915647 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:31 crc kubenswrapper[4854]: I1125 09:57:31.923456 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:31 crc kubenswrapper[4854]: I1125 09:57:31.979608 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:32 crc kubenswrapper[4854]: I1125 09:57:32.643807 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-7wk5r" Nov 25 09:57:32 crc kubenswrapper[4854]: I1125 09:57:32.788379 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd7395cb-72b7-4b86-860c-9ba1b3ccd34e-config-data\") pod \"bd7395cb-72b7-4b86-860c-9ba1b3ccd34e\" (UID: \"bd7395cb-72b7-4b86-860c-9ba1b3ccd34e\") " Nov 25 09:57:32 crc kubenswrapper[4854]: I1125 09:57:32.788423 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jl6rq\" (UniqueName: \"kubernetes.io/projected/bd7395cb-72b7-4b86-860c-9ba1b3ccd34e-kube-api-access-jl6rq\") pod \"bd7395cb-72b7-4b86-860c-9ba1b3ccd34e\" (UID: \"bd7395cb-72b7-4b86-860c-9ba1b3ccd34e\") " Nov 25 09:57:32 crc kubenswrapper[4854]: I1125 09:57:32.788465 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/bd7395cb-72b7-4b86-860c-9ba1b3ccd34e-db-sync-config-data\") pod \"bd7395cb-72b7-4b86-860c-9ba1b3ccd34e\" (UID: \"bd7395cb-72b7-4b86-860c-9ba1b3ccd34e\") " Nov 25 09:57:32 crc kubenswrapper[4854]: I1125 09:57:32.788568 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd7395cb-72b7-4b86-860c-9ba1b3ccd34e-combined-ca-bundle\") pod \"bd7395cb-72b7-4b86-860c-9ba1b3ccd34e\" (UID: \"bd7395cb-72b7-4b86-860c-9ba1b3ccd34e\") " Nov 25 09:57:32 crc kubenswrapper[4854]: I1125 09:57:32.795225 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd7395cb-72b7-4b86-860c-9ba1b3ccd34e-kube-api-access-jl6rq" (OuterVolumeSpecName: "kube-api-access-jl6rq") pod "bd7395cb-72b7-4b86-860c-9ba1b3ccd34e" (UID: "bd7395cb-72b7-4b86-860c-9ba1b3ccd34e"). InnerVolumeSpecName "kube-api-access-jl6rq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:57:32 crc kubenswrapper[4854]: I1125 09:57:32.796515 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd7395cb-72b7-4b86-860c-9ba1b3ccd34e-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "bd7395cb-72b7-4b86-860c-9ba1b3ccd34e" (UID: "bd7395cb-72b7-4b86-860c-9ba1b3ccd34e"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:57:32 crc kubenswrapper[4854]: I1125 09:57:32.826529 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd7395cb-72b7-4b86-860c-9ba1b3ccd34e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bd7395cb-72b7-4b86-860c-9ba1b3ccd34e" (UID: "bd7395cb-72b7-4b86-860c-9ba1b3ccd34e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:57:32 crc kubenswrapper[4854]: I1125 09:57:32.857089 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd7395cb-72b7-4b86-860c-9ba1b3ccd34e-config-data" (OuterVolumeSpecName: "config-data") pod "bd7395cb-72b7-4b86-860c-9ba1b3ccd34e" (UID: "bd7395cb-72b7-4b86-860c-9ba1b3ccd34e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:57:32 crc kubenswrapper[4854]: I1125 09:57:32.891218 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd7395cb-72b7-4b86-860c-9ba1b3ccd34e-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:32 crc kubenswrapper[4854]: I1125 09:57:32.891255 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jl6rq\" (UniqueName: \"kubernetes.io/projected/bd7395cb-72b7-4b86-860c-9ba1b3ccd34e-kube-api-access-jl6rq\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:32 crc kubenswrapper[4854]: I1125 09:57:32.891270 4854 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/bd7395cb-72b7-4b86-860c-9ba1b3ccd34e-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:32 crc kubenswrapper[4854]: I1125 09:57:32.891280 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd7395cb-72b7-4b86-860c-9ba1b3ccd34e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:32 crc kubenswrapper[4854]: I1125 09:57:32.991522 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-7wk5r" event={"ID":"bd7395cb-72b7-4b86-860c-9ba1b3ccd34e","Type":"ContainerDied","Data":"889b67a15505596efd60e54f09b441ed669b5e3caaff256c309fa9aa3e7d2423"} Nov 25 09:57:32 crc kubenswrapper[4854]: I1125 09:57:32.991574 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-7wk5r" Nov 25 09:57:32 crc kubenswrapper[4854]: I1125 09:57:32.991585 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="889b67a15505596efd60e54f09b441ed669b5e3caaff256c309fa9aa3e7d2423" Nov 25 09:57:33 crc kubenswrapper[4854]: I1125 09:57:33.414840 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-4zd55"] Nov 25 09:57:33 crc kubenswrapper[4854]: I1125 09:57:33.415263 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5c79d794d7-4zd55" podUID="b72fc225-1778-455c-9696-fb610d606860" containerName="dnsmasq-dns" containerID="cri-o://baad88c0f04e91c5fee5166751f6ceb71c4d76cc271c0a14ed4e2fe67b4d7a37" gracePeriod=10 Nov 25 09:57:33 crc kubenswrapper[4854]: I1125 09:57:33.444471 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5f59b8f679-5fq4s"] Nov 25 09:57:33 crc kubenswrapper[4854]: E1125 09:57:33.445016 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd7395cb-72b7-4b86-860c-9ba1b3ccd34e" containerName="glance-db-sync" Nov 25 09:57:33 crc kubenswrapper[4854]: I1125 09:57:33.445036 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd7395cb-72b7-4b86-860c-9ba1b3ccd34e" containerName="glance-db-sync" Nov 25 09:57:33 crc kubenswrapper[4854]: I1125 09:57:33.445280 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd7395cb-72b7-4b86-860c-9ba1b3ccd34e" containerName="glance-db-sync" Nov 25 09:57:33 crc kubenswrapper[4854]: I1125 09:57:33.447524 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f59b8f679-5fq4s" Nov 25 09:57:33 crc kubenswrapper[4854]: I1125 09:57:33.461445 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f59b8f679-5fq4s"] Nov 25 09:57:33 crc kubenswrapper[4854]: I1125 09:57:33.604486 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/51aab4f1-d50b-47ba-b45d-e820d83ba125-ovsdbserver-sb\") pod \"dnsmasq-dns-5f59b8f679-5fq4s\" (UID: \"51aab4f1-d50b-47ba-b45d-e820d83ba125\") " pod="openstack/dnsmasq-dns-5f59b8f679-5fq4s" Nov 25 09:57:33 crc kubenswrapper[4854]: I1125 09:57:33.604558 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/51aab4f1-d50b-47ba-b45d-e820d83ba125-dns-svc\") pod \"dnsmasq-dns-5f59b8f679-5fq4s\" (UID: \"51aab4f1-d50b-47ba-b45d-e820d83ba125\") " pod="openstack/dnsmasq-dns-5f59b8f679-5fq4s" Nov 25 09:57:33 crc kubenswrapper[4854]: I1125 09:57:33.604814 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/51aab4f1-d50b-47ba-b45d-e820d83ba125-config\") pod \"dnsmasq-dns-5f59b8f679-5fq4s\" (UID: \"51aab4f1-d50b-47ba-b45d-e820d83ba125\") " pod="openstack/dnsmasq-dns-5f59b8f679-5fq4s" Nov 25 09:57:33 crc kubenswrapper[4854]: I1125 09:57:33.604919 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/51aab4f1-d50b-47ba-b45d-e820d83ba125-dns-swift-storage-0\") pod \"dnsmasq-dns-5f59b8f679-5fq4s\" (UID: \"51aab4f1-d50b-47ba-b45d-e820d83ba125\") " pod="openstack/dnsmasq-dns-5f59b8f679-5fq4s" Nov 25 09:57:33 crc kubenswrapper[4854]: I1125 09:57:33.604981 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dxplr\" (UniqueName: \"kubernetes.io/projected/51aab4f1-d50b-47ba-b45d-e820d83ba125-kube-api-access-dxplr\") pod \"dnsmasq-dns-5f59b8f679-5fq4s\" (UID: \"51aab4f1-d50b-47ba-b45d-e820d83ba125\") " pod="openstack/dnsmasq-dns-5f59b8f679-5fq4s" Nov 25 09:57:33 crc kubenswrapper[4854]: I1125 09:57:33.605072 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/51aab4f1-d50b-47ba-b45d-e820d83ba125-ovsdbserver-nb\") pod \"dnsmasq-dns-5f59b8f679-5fq4s\" (UID: \"51aab4f1-d50b-47ba-b45d-e820d83ba125\") " pod="openstack/dnsmasq-dns-5f59b8f679-5fq4s" Nov 25 09:57:33 crc kubenswrapper[4854]: I1125 09:57:33.706719 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/51aab4f1-d50b-47ba-b45d-e820d83ba125-config\") pod \"dnsmasq-dns-5f59b8f679-5fq4s\" (UID: \"51aab4f1-d50b-47ba-b45d-e820d83ba125\") " pod="openstack/dnsmasq-dns-5f59b8f679-5fq4s" Nov 25 09:57:33 crc kubenswrapper[4854]: I1125 09:57:33.706782 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/51aab4f1-d50b-47ba-b45d-e820d83ba125-dns-swift-storage-0\") pod \"dnsmasq-dns-5f59b8f679-5fq4s\" (UID: \"51aab4f1-d50b-47ba-b45d-e820d83ba125\") " pod="openstack/dnsmasq-dns-5f59b8f679-5fq4s" Nov 25 09:57:33 crc kubenswrapper[4854]: I1125 09:57:33.706811 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dxplr\" (UniqueName: \"kubernetes.io/projected/51aab4f1-d50b-47ba-b45d-e820d83ba125-kube-api-access-dxplr\") pod \"dnsmasq-dns-5f59b8f679-5fq4s\" (UID: \"51aab4f1-d50b-47ba-b45d-e820d83ba125\") " pod="openstack/dnsmasq-dns-5f59b8f679-5fq4s" Nov 25 09:57:33 crc kubenswrapper[4854]: I1125 09:57:33.706848 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/51aab4f1-d50b-47ba-b45d-e820d83ba125-ovsdbserver-nb\") pod \"dnsmasq-dns-5f59b8f679-5fq4s\" (UID: \"51aab4f1-d50b-47ba-b45d-e820d83ba125\") " pod="openstack/dnsmasq-dns-5f59b8f679-5fq4s" Nov 25 09:57:33 crc kubenswrapper[4854]: I1125 09:57:33.706911 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/51aab4f1-d50b-47ba-b45d-e820d83ba125-ovsdbserver-sb\") pod \"dnsmasq-dns-5f59b8f679-5fq4s\" (UID: \"51aab4f1-d50b-47ba-b45d-e820d83ba125\") " pod="openstack/dnsmasq-dns-5f59b8f679-5fq4s" Nov 25 09:57:33 crc kubenswrapper[4854]: I1125 09:57:33.706947 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/51aab4f1-d50b-47ba-b45d-e820d83ba125-dns-svc\") pod \"dnsmasq-dns-5f59b8f679-5fq4s\" (UID: \"51aab4f1-d50b-47ba-b45d-e820d83ba125\") " pod="openstack/dnsmasq-dns-5f59b8f679-5fq4s" Nov 25 09:57:33 crc kubenswrapper[4854]: I1125 09:57:33.708169 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/51aab4f1-d50b-47ba-b45d-e820d83ba125-dns-svc\") pod \"dnsmasq-dns-5f59b8f679-5fq4s\" (UID: \"51aab4f1-d50b-47ba-b45d-e820d83ba125\") " pod="openstack/dnsmasq-dns-5f59b8f679-5fq4s" Nov 25 09:57:33 crc kubenswrapper[4854]: I1125 09:57:33.710603 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/51aab4f1-d50b-47ba-b45d-e820d83ba125-ovsdbserver-sb\") pod \"dnsmasq-dns-5f59b8f679-5fq4s\" (UID: \"51aab4f1-d50b-47ba-b45d-e820d83ba125\") " pod="openstack/dnsmasq-dns-5f59b8f679-5fq4s" Nov 25 09:57:33 crc kubenswrapper[4854]: I1125 09:57:33.711311 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/51aab4f1-d50b-47ba-b45d-e820d83ba125-ovsdbserver-nb\") pod \"dnsmasq-dns-5f59b8f679-5fq4s\" (UID: \"51aab4f1-d50b-47ba-b45d-e820d83ba125\") " pod="openstack/dnsmasq-dns-5f59b8f679-5fq4s" Nov 25 09:57:33 crc kubenswrapper[4854]: I1125 09:57:33.711814 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/51aab4f1-d50b-47ba-b45d-e820d83ba125-config\") pod \"dnsmasq-dns-5f59b8f679-5fq4s\" (UID: \"51aab4f1-d50b-47ba-b45d-e820d83ba125\") " pod="openstack/dnsmasq-dns-5f59b8f679-5fq4s" Nov 25 09:57:33 crc kubenswrapper[4854]: I1125 09:57:33.712217 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/51aab4f1-d50b-47ba-b45d-e820d83ba125-dns-swift-storage-0\") pod \"dnsmasq-dns-5f59b8f679-5fq4s\" (UID: \"51aab4f1-d50b-47ba-b45d-e820d83ba125\") " pod="openstack/dnsmasq-dns-5f59b8f679-5fq4s" Nov 25 09:57:33 crc kubenswrapper[4854]: I1125 09:57:33.740592 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dxplr\" (UniqueName: \"kubernetes.io/projected/51aab4f1-d50b-47ba-b45d-e820d83ba125-kube-api-access-dxplr\") pod \"dnsmasq-dns-5f59b8f679-5fq4s\" (UID: \"51aab4f1-d50b-47ba-b45d-e820d83ba125\") " pod="openstack/dnsmasq-dns-5f59b8f679-5fq4s" Nov 25 09:57:33 crc kubenswrapper[4854]: I1125 09:57:33.774983 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f59b8f679-5fq4s" Nov 25 09:57:33 crc kubenswrapper[4854]: I1125 09:57:33.969849 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-4zd55" Nov 25 09:57:34 crc kubenswrapper[4854]: I1125 09:57:34.015392 4854 generic.go:334] "Generic (PLEG): container finished" podID="b72fc225-1778-455c-9696-fb610d606860" containerID="baad88c0f04e91c5fee5166751f6ceb71c4d76cc271c0a14ed4e2fe67b4d7a37" exitCode=0 Nov 25 09:57:34 crc kubenswrapper[4854]: I1125 09:57:34.015420 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-4zd55" event={"ID":"b72fc225-1778-455c-9696-fb610d606860","Type":"ContainerDied","Data":"baad88c0f04e91c5fee5166751f6ceb71c4d76cc271c0a14ed4e2fe67b4d7a37"} Nov 25 09:57:34 crc kubenswrapper[4854]: I1125 09:57:34.015440 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-4zd55" event={"ID":"b72fc225-1778-455c-9696-fb610d606860","Type":"ContainerDied","Data":"ed91b1f1438f05136483af8f0e150986cbf45603df1fb80f930778e287de53bd"} Nov 25 09:57:34 crc kubenswrapper[4854]: I1125 09:57:34.015457 4854 scope.go:117] "RemoveContainer" containerID="baad88c0f04e91c5fee5166751f6ceb71c4d76cc271c0a14ed4e2fe67b4d7a37" Nov 25 09:57:34 crc kubenswrapper[4854]: I1125 09:57:34.015582 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-4zd55" Nov 25 09:57:34 crc kubenswrapper[4854]: I1125 09:57:34.046964 4854 scope.go:117] "RemoveContainer" containerID="279f9ea18e91e1e62edfc6ae91746c0cebf0b2c20fde5eca3b52626ed83ad1ca" Nov 25 09:57:34 crc kubenswrapper[4854]: I1125 09:57:34.080920 4854 scope.go:117] "RemoveContainer" containerID="baad88c0f04e91c5fee5166751f6ceb71c4d76cc271c0a14ed4e2fe67b4d7a37" Nov 25 09:57:34 crc kubenswrapper[4854]: E1125 09:57:34.082979 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"baad88c0f04e91c5fee5166751f6ceb71c4d76cc271c0a14ed4e2fe67b4d7a37\": container with ID starting with baad88c0f04e91c5fee5166751f6ceb71c4d76cc271c0a14ed4e2fe67b4d7a37 not found: ID does not exist" containerID="baad88c0f04e91c5fee5166751f6ceb71c4d76cc271c0a14ed4e2fe67b4d7a37" Nov 25 09:57:34 crc kubenswrapper[4854]: I1125 09:57:34.083021 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"baad88c0f04e91c5fee5166751f6ceb71c4d76cc271c0a14ed4e2fe67b4d7a37"} err="failed to get container status \"baad88c0f04e91c5fee5166751f6ceb71c4d76cc271c0a14ed4e2fe67b4d7a37\": rpc error: code = NotFound desc = could not find container \"baad88c0f04e91c5fee5166751f6ceb71c4d76cc271c0a14ed4e2fe67b4d7a37\": container with ID starting with baad88c0f04e91c5fee5166751f6ceb71c4d76cc271c0a14ed4e2fe67b4d7a37 not found: ID does not exist" Nov 25 09:57:34 crc kubenswrapper[4854]: I1125 09:57:34.083049 4854 scope.go:117] "RemoveContainer" containerID="279f9ea18e91e1e62edfc6ae91746c0cebf0b2c20fde5eca3b52626ed83ad1ca" Nov 25 09:57:34 crc kubenswrapper[4854]: E1125 09:57:34.083589 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"279f9ea18e91e1e62edfc6ae91746c0cebf0b2c20fde5eca3b52626ed83ad1ca\": container with ID starting with 279f9ea18e91e1e62edfc6ae91746c0cebf0b2c20fde5eca3b52626ed83ad1ca not found: ID does not exist" containerID="279f9ea18e91e1e62edfc6ae91746c0cebf0b2c20fde5eca3b52626ed83ad1ca" Nov 25 09:57:34 crc kubenswrapper[4854]: I1125 09:57:34.083636 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"279f9ea18e91e1e62edfc6ae91746c0cebf0b2c20fde5eca3b52626ed83ad1ca"} err="failed to get container status \"279f9ea18e91e1e62edfc6ae91746c0cebf0b2c20fde5eca3b52626ed83ad1ca\": rpc error: code = NotFound desc = could not find container \"279f9ea18e91e1e62edfc6ae91746c0cebf0b2c20fde5eca3b52626ed83ad1ca\": container with ID starting with 279f9ea18e91e1e62edfc6ae91746c0cebf0b2c20fde5eca3b52626ed83ad1ca not found: ID does not exist" Nov 25 09:57:34 crc kubenswrapper[4854]: I1125 09:57:34.116847 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b72fc225-1778-455c-9696-fb610d606860-ovsdbserver-nb\") pod \"b72fc225-1778-455c-9696-fb610d606860\" (UID: \"b72fc225-1778-455c-9696-fb610d606860\") " Nov 25 09:57:34 crc kubenswrapper[4854]: I1125 09:57:34.116903 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b72fc225-1778-455c-9696-fb610d606860-dns-svc\") pod \"b72fc225-1778-455c-9696-fb610d606860\" (UID: \"b72fc225-1778-455c-9696-fb610d606860\") " Nov 25 09:57:34 crc kubenswrapper[4854]: I1125 09:57:34.117065 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b72fc225-1778-455c-9696-fb610d606860-dns-swift-storage-0\") pod \"b72fc225-1778-455c-9696-fb610d606860\" (UID: \"b72fc225-1778-455c-9696-fb610d606860\") " Nov 25 09:57:34 crc kubenswrapper[4854]: I1125 09:57:34.117157 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b72fc225-1778-455c-9696-fb610d606860-config\") pod \"b72fc225-1778-455c-9696-fb610d606860\" (UID: \"b72fc225-1778-455c-9696-fb610d606860\") " Nov 25 09:57:34 crc kubenswrapper[4854]: I1125 09:57:34.117251 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4srbr\" (UniqueName: \"kubernetes.io/projected/b72fc225-1778-455c-9696-fb610d606860-kube-api-access-4srbr\") pod \"b72fc225-1778-455c-9696-fb610d606860\" (UID: \"b72fc225-1778-455c-9696-fb610d606860\") " Nov 25 09:57:34 crc kubenswrapper[4854]: I1125 09:57:34.117320 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b72fc225-1778-455c-9696-fb610d606860-ovsdbserver-sb\") pod \"b72fc225-1778-455c-9696-fb610d606860\" (UID: \"b72fc225-1778-455c-9696-fb610d606860\") " Nov 25 09:57:34 crc kubenswrapper[4854]: I1125 09:57:34.124200 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b72fc225-1778-455c-9696-fb610d606860-kube-api-access-4srbr" (OuterVolumeSpecName: "kube-api-access-4srbr") pod "b72fc225-1778-455c-9696-fb610d606860" (UID: "b72fc225-1778-455c-9696-fb610d606860"). InnerVolumeSpecName "kube-api-access-4srbr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:57:34 crc kubenswrapper[4854]: I1125 09:57:34.173390 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b72fc225-1778-455c-9696-fb610d606860-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "b72fc225-1778-455c-9696-fb610d606860" (UID: "b72fc225-1778-455c-9696-fb610d606860"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:57:34 crc kubenswrapper[4854]: I1125 09:57:34.189626 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b72fc225-1778-455c-9696-fb610d606860-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b72fc225-1778-455c-9696-fb610d606860" (UID: "b72fc225-1778-455c-9696-fb610d606860"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:57:34 crc kubenswrapper[4854]: I1125 09:57:34.210291 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b72fc225-1778-455c-9696-fb610d606860-config" (OuterVolumeSpecName: "config") pod "b72fc225-1778-455c-9696-fb610d606860" (UID: "b72fc225-1778-455c-9696-fb610d606860"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:57:34 crc kubenswrapper[4854]: I1125 09:57:34.215358 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b72fc225-1778-455c-9696-fb610d606860-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b72fc225-1778-455c-9696-fb610d606860" (UID: "b72fc225-1778-455c-9696-fb610d606860"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:57:34 crc kubenswrapper[4854]: I1125 09:57:34.229455 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b72fc225-1778-455c-9696-fb610d606860-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b72fc225-1778-455c-9696-fb610d606860" (UID: "b72fc225-1778-455c-9696-fb610d606860"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:57:34 crc kubenswrapper[4854]: I1125 09:57:34.233515 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4srbr\" (UniqueName: \"kubernetes.io/projected/b72fc225-1778-455c-9696-fb610d606860-kube-api-access-4srbr\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:34 crc kubenswrapper[4854]: I1125 09:57:34.233555 4854 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b72fc225-1778-455c-9696-fb610d606860-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:34 crc kubenswrapper[4854]: I1125 09:57:34.233564 4854 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b72fc225-1778-455c-9696-fb610d606860-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:34 crc kubenswrapper[4854]: I1125 09:57:34.233576 4854 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b72fc225-1778-455c-9696-fb610d606860-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:34 crc kubenswrapper[4854]: I1125 09:57:34.233586 4854 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b72fc225-1778-455c-9696-fb610d606860-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:34 crc kubenswrapper[4854]: I1125 09:57:34.233594 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b72fc225-1778-455c-9696-fb610d606860-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:34 crc kubenswrapper[4854]: I1125 09:57:34.344193 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f59b8f679-5fq4s"] Nov 25 09:57:34 crc kubenswrapper[4854]: I1125 09:57:34.360575 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-4zd55"] Nov 25 09:57:34 crc kubenswrapper[4854]: I1125 09:57:34.368933 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-4zd55"] Nov 25 09:57:34 crc kubenswrapper[4854]: I1125 09:57:34.851003 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-2" Nov 25 09:57:34 crc kubenswrapper[4854]: I1125 09:57:34.866886 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-1" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.027510 4854 generic.go:334] "Generic (PLEG): container finished" podID="51aab4f1-d50b-47ba-b45d-e820d83ba125" containerID="a9e7cd7e080eeec6a6f18285c55c32226cf2e4b168a5d18ed7b223a66a158f83" exitCode=0 Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.034207 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b72fc225-1778-455c-9696-fb610d606860" path="/var/lib/kubelet/pods/b72fc225-1778-455c-9696-fb610d606860/volumes" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.035219 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f59b8f679-5fq4s" event={"ID":"51aab4f1-d50b-47ba-b45d-e820d83ba125","Type":"ContainerDied","Data":"a9e7cd7e080eeec6a6f18285c55c32226cf2e4b168a5d18ed7b223a66a158f83"} Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.035333 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f59b8f679-5fq4s" event={"ID":"51aab4f1-d50b-47ba-b45d-e820d83ba125","Type":"ContainerStarted","Data":"768e901204c46b46dee173f6e2ac6661786f452acff98cd6e4b591f87c155e6e"} Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.208467 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-m2l2v"] Nov 25 09:57:35 crc kubenswrapper[4854]: E1125 09:57:35.210719 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b72fc225-1778-455c-9696-fb610d606860" containerName="init" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.210740 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="b72fc225-1778-455c-9696-fb610d606860" containerName="init" Nov 25 09:57:35 crc kubenswrapper[4854]: E1125 09:57:35.210759 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b72fc225-1778-455c-9696-fb610d606860" containerName="dnsmasq-dns" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.210765 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="b72fc225-1778-455c-9696-fb610d606860" containerName="dnsmasq-dns" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.211582 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="b72fc225-1778-455c-9696-fb610d606860" containerName="dnsmasq-dns" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.212893 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-m2l2v" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.229426 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.272956 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-m2l2v"] Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.294231 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.294998 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="93c535b4-23bb-4c71-8ddc-1304ca205e55" containerName="thanos-sidecar" containerID="cri-o://80dac60a0c43639e6b5f2caa74912607cd28500ca7d30e1213e8689c204c0396" gracePeriod=600 Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.294996 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="93c535b4-23bb-4c71-8ddc-1304ca205e55" containerName="prometheus" containerID="cri-o://e14583c42ce5d5a2261a9b592f1cdc639741839ca7d9929733691ee983791aa3" gracePeriod=600 Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.295283 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="93c535b4-23bb-4c71-8ddc-1304ca205e55" containerName="config-reloader" containerID="cri-o://9622afa4b2cbb87f9b6fa5ca3f47e7bc5b92acc2a407dc54fa80c47312a3e647" gracePeriod=600 Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.373927 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dd430211-c49c-40f1-a776-de76234249eb-operator-scripts\") pod \"cinder-db-create-m2l2v\" (UID: \"dd430211-c49c-40f1-a776-de76234249eb\") " pod="openstack/cinder-db-create-m2l2v" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.373996 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zkjjm\" (UniqueName: \"kubernetes.io/projected/dd430211-c49c-40f1-a776-de76234249eb-kube-api-access-zkjjm\") pod \"cinder-db-create-m2l2v\" (UID: \"dd430211-c49c-40f1-a776-de76234249eb\") " pod="openstack/cinder-db-create-m2l2v" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.410663 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-hmkhw"] Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.412204 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-hmkhw" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.428402 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-d94f-account-create-md9dh"] Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.429655 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-d94f-account-create-md9dh" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.439129 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.457979 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-hmkhw"] Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.475484 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dd430211-c49c-40f1-a776-de76234249eb-operator-scripts\") pod \"cinder-db-create-m2l2v\" (UID: \"dd430211-c49c-40f1-a776-de76234249eb\") " pod="openstack/cinder-db-create-m2l2v" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.475536 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zkjjm\" (UniqueName: \"kubernetes.io/projected/dd430211-c49c-40f1-a776-de76234249eb-kube-api-access-zkjjm\") pod \"cinder-db-create-m2l2v\" (UID: \"dd430211-c49c-40f1-a776-de76234249eb\") " pod="openstack/cinder-db-create-m2l2v" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.475578 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/48cb153a-315a-4e7b-adea-51a576cd48a6-operator-scripts\") pod \"barbican-db-create-hmkhw\" (UID: \"48cb153a-315a-4e7b-adea-51a576cd48a6\") " pod="openstack/barbican-db-create-hmkhw" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.475658 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xc4q4\" (UniqueName: \"kubernetes.io/projected/48cb153a-315a-4e7b-adea-51a576cd48a6-kube-api-access-xc4q4\") pod \"barbican-db-create-hmkhw\" (UID: \"48cb153a-315a-4e7b-adea-51a576cd48a6\") " pod="openstack/barbican-db-create-hmkhw" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.476628 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dd430211-c49c-40f1-a776-de76234249eb-operator-scripts\") pod \"cinder-db-create-m2l2v\" (UID: \"dd430211-c49c-40f1-a776-de76234249eb\") " pod="openstack/cinder-db-create-m2l2v" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.489272 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-d94f-account-create-md9dh"] Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.503939 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zkjjm\" (UniqueName: \"kubernetes.io/projected/dd430211-c49c-40f1-a776-de76234249eb-kube-api-access-zkjjm\") pod \"cinder-db-create-m2l2v\" (UID: \"dd430211-c49c-40f1-a776-de76234249eb\") " pod="openstack/cinder-db-create-m2l2v" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.545638 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-fbd2-account-create-wh7nh"] Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.547174 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-fbd2-account-create-wh7nh" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.579556 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-fbd2-account-create-wh7nh"] Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.587993 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/48cb153a-315a-4e7b-adea-51a576cd48a6-operator-scripts\") pod \"barbican-db-create-hmkhw\" (UID: \"48cb153a-315a-4e7b-adea-51a576cd48a6\") " pod="openstack/barbican-db-create-hmkhw" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.588183 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xc4q4\" (UniqueName: \"kubernetes.io/projected/48cb153a-315a-4e7b-adea-51a576cd48a6-kube-api-access-xc4q4\") pod \"barbican-db-create-hmkhw\" (UID: \"48cb153a-315a-4e7b-adea-51a576cd48a6\") " pod="openstack/barbican-db-create-hmkhw" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.589143 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/48cb153a-315a-4e7b-adea-51a576cd48a6-operator-scripts\") pod \"barbican-db-create-hmkhw\" (UID: \"48cb153a-315a-4e7b-adea-51a576cd48a6\") " pod="openstack/barbican-db-create-hmkhw" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.591274 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-db-secret" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.601768 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6caae98-1807-47ad-a1b8-ddc1ff33b2d9-operator-scripts\") pod \"barbican-d94f-account-create-md9dh\" (UID: \"b6caae98-1807-47ad-a1b8-ddc1ff33b2d9\") " pod="openstack/barbican-d94f-account-create-md9dh" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.601842 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v9db7\" (UniqueName: \"kubernetes.io/projected/b6caae98-1807-47ad-a1b8-ddc1ff33b2d9-kube-api-access-v9db7\") pod \"barbican-d94f-account-create-md9dh\" (UID: \"b6caae98-1807-47ad-a1b8-ddc1ff33b2d9\") " pod="openstack/barbican-d94f-account-create-md9dh" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.602667 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-m2l2v" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.627108 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xc4q4\" (UniqueName: \"kubernetes.io/projected/48cb153a-315a-4e7b-adea-51a576cd48a6-kube-api-access-xc4q4\") pod \"barbican-db-create-hmkhw\" (UID: \"48cb153a-315a-4e7b-adea-51a576cd48a6\") " pod="openstack/barbican-db-create-hmkhw" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.705530 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bj699\" (UniqueName: \"kubernetes.io/projected/c7341835-d9f5-46b4-b382-364fab8df0ac-kube-api-access-bj699\") pod \"heat-fbd2-account-create-wh7nh\" (UID: \"c7341835-d9f5-46b4-b382-364fab8df0ac\") " pod="openstack/heat-fbd2-account-create-wh7nh" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.705897 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6caae98-1807-47ad-a1b8-ddc1ff33b2d9-operator-scripts\") pod \"barbican-d94f-account-create-md9dh\" (UID: \"b6caae98-1807-47ad-a1b8-ddc1ff33b2d9\") " pod="openstack/barbican-d94f-account-create-md9dh" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.705939 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v9db7\" (UniqueName: \"kubernetes.io/projected/b6caae98-1807-47ad-a1b8-ddc1ff33b2d9-kube-api-access-v9db7\") pod \"barbican-d94f-account-create-md9dh\" (UID: \"b6caae98-1807-47ad-a1b8-ddc1ff33b2d9\") " pod="openstack/barbican-d94f-account-create-md9dh" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.705965 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c7341835-d9f5-46b4-b382-364fab8df0ac-operator-scripts\") pod \"heat-fbd2-account-create-wh7nh\" (UID: \"c7341835-d9f5-46b4-b382-364fab8df0ac\") " pod="openstack/heat-fbd2-account-create-wh7nh" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.707355 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6caae98-1807-47ad-a1b8-ddc1ff33b2d9-operator-scripts\") pod \"barbican-d94f-account-create-md9dh\" (UID: \"b6caae98-1807-47ad-a1b8-ddc1ff33b2d9\") " pod="openstack/barbican-d94f-account-create-md9dh" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.736679 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v9db7\" (UniqueName: \"kubernetes.io/projected/b6caae98-1807-47ad-a1b8-ddc1ff33b2d9-kube-api-access-v9db7\") pod \"barbican-d94f-account-create-md9dh\" (UID: \"b6caae98-1807-47ad-a1b8-ddc1ff33b2d9\") " pod="openstack/barbican-d94f-account-create-md9dh" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.765119 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-hmkhw" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.765814 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-create-7v9gw"] Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.767405 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-7v9gw" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.783952 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-32fe-account-create-b4mz2"] Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.789577 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-d94f-account-create-md9dh" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.808434 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bj699\" (UniqueName: \"kubernetes.io/projected/c7341835-d9f5-46b4-b382-364fab8df0ac-kube-api-access-bj699\") pod \"heat-fbd2-account-create-wh7nh\" (UID: \"c7341835-d9f5-46b4-b382-364fab8df0ac\") " pod="openstack/heat-fbd2-account-create-wh7nh" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.808495 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c7341835-d9f5-46b4-b382-364fab8df0ac-operator-scripts\") pod \"heat-fbd2-account-create-wh7nh\" (UID: \"c7341835-d9f5-46b4-b382-364fab8df0ac\") " pod="openstack/heat-fbd2-account-create-wh7nh" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.809302 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-32fe-account-create-b4mz2"] Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.809437 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-32fe-account-create-b4mz2" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.809772 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c7341835-d9f5-46b4-b382-364fab8df0ac-operator-scripts\") pod \"heat-fbd2-account-create-wh7nh\" (UID: \"c7341835-d9f5-46b4-b382-364fab8df0ac\") " pod="openstack/heat-fbd2-account-create-wh7nh" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.812834 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.813021 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-7v9gw"] Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.839300 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-4xfns"] Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.840721 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-4xfns" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.842996 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bj699\" (UniqueName: \"kubernetes.io/projected/c7341835-d9f5-46b4-b382-364fab8df0ac-kube-api-access-bj699\") pod \"heat-fbd2-account-create-wh7nh\" (UID: \"c7341835-d9f5-46b4-b382-364fab8df0ac\") " pod="openstack/heat-fbd2-account-create-wh7nh" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.845829 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.845829 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.851096 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.851344 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-wks95" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.851573 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-4xfns"] Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.871598 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-f4l8h"] Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.875978 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-f4l8h" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.898586 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-f4l8h"] Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.910112 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf266cb3-661b-4fbd-8162-8da1268957be-operator-scripts\") pod \"heat-db-create-7v9gw\" (UID: \"bf266cb3-661b-4fbd-8162-8da1268957be\") " pod="openstack/heat-db-create-7v9gw" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.910157 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6pkgk\" (UniqueName: \"kubernetes.io/projected/a253f619-24d4-453e-815f-c5301c77799c-kube-api-access-6pkgk\") pod \"keystone-db-sync-4xfns\" (UID: \"a253f619-24d4-453e-815f-c5301c77799c\") " pod="openstack/keystone-db-sync-4xfns" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.910206 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v42kq\" (UniqueName: \"kubernetes.io/projected/bf266cb3-661b-4fbd-8162-8da1268957be-kube-api-access-v42kq\") pod \"heat-db-create-7v9gw\" (UID: \"bf266cb3-661b-4fbd-8162-8da1268957be\") " pod="openstack/heat-db-create-7v9gw" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.910224 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/89ab6a6b-99ee-4388-92a6-d1e0bb469abc-operator-scripts\") pod \"cinder-32fe-account-create-b4mz2\" (UID: \"89ab6a6b-99ee-4388-92a6-d1e0bb469abc\") " pod="openstack/cinder-32fe-account-create-b4mz2" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.910254 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a253f619-24d4-453e-815f-c5301c77799c-config-data\") pod \"keystone-db-sync-4xfns\" (UID: \"a253f619-24d4-453e-815f-c5301c77799c\") " pod="openstack/keystone-db-sync-4xfns" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.910277 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a253f619-24d4-453e-815f-c5301c77799c-combined-ca-bundle\") pod \"keystone-db-sync-4xfns\" (UID: \"a253f619-24d4-453e-815f-c5301c77799c\") " pod="openstack/keystone-db-sync-4xfns" Nov 25 09:57:35 crc kubenswrapper[4854]: I1125 09:57:35.910373 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pmcqv\" (UniqueName: \"kubernetes.io/projected/89ab6a6b-99ee-4388-92a6-d1e0bb469abc-kube-api-access-pmcqv\") pod \"cinder-32fe-account-create-b4mz2\" (UID: \"89ab6a6b-99ee-4388-92a6-d1e0bb469abc\") " pod="openstack/cinder-32fe-account-create-b4mz2" Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.004968 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-fbd2-account-create-wh7nh" Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.014689 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a253f619-24d4-453e-815f-c5301c77799c-config-data\") pod \"keystone-db-sync-4xfns\" (UID: \"a253f619-24d4-453e-815f-c5301c77799c\") " pod="openstack/keystone-db-sync-4xfns" Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.014750 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a253f619-24d4-453e-815f-c5301c77799c-combined-ca-bundle\") pod \"keystone-db-sync-4xfns\" (UID: \"a253f619-24d4-453e-815f-c5301c77799c\") " pod="openstack/keystone-db-sync-4xfns" Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.014943 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cgfkm\" (UniqueName: \"kubernetes.io/projected/ba1f1fb5-ec0c-48be-9fd8-a0fd1a040360-kube-api-access-cgfkm\") pod \"neutron-db-create-f4l8h\" (UID: \"ba1f1fb5-ec0c-48be-9fd8-a0fd1a040360\") " pod="openstack/neutron-db-create-f4l8h" Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.015008 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pmcqv\" (UniqueName: \"kubernetes.io/projected/89ab6a6b-99ee-4388-92a6-d1e0bb469abc-kube-api-access-pmcqv\") pod \"cinder-32fe-account-create-b4mz2\" (UID: \"89ab6a6b-99ee-4388-92a6-d1e0bb469abc\") " pod="openstack/cinder-32fe-account-create-b4mz2" Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.015081 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-c575-account-create-kpd7g"] Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.019337 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-c575-account-create-kpd7g" Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.023055 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.028734 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a253f619-24d4-453e-815f-c5301c77799c-config-data\") pod \"keystone-db-sync-4xfns\" (UID: \"a253f619-24d4-453e-815f-c5301c77799c\") " pod="openstack/keystone-db-sync-4xfns" Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.015083 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ba1f1fb5-ec0c-48be-9fd8-a0fd1a040360-operator-scripts\") pod \"neutron-db-create-f4l8h\" (UID: \"ba1f1fb5-ec0c-48be-9fd8-a0fd1a040360\") " pod="openstack/neutron-db-create-f4l8h" Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.044237 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf266cb3-661b-4fbd-8162-8da1268957be-operator-scripts\") pod \"heat-db-create-7v9gw\" (UID: \"bf266cb3-661b-4fbd-8162-8da1268957be\") " pod="openstack/heat-db-create-7v9gw" Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.044302 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6pkgk\" (UniqueName: \"kubernetes.io/projected/a253f619-24d4-453e-815f-c5301c77799c-kube-api-access-6pkgk\") pod \"keystone-db-sync-4xfns\" (UID: \"a253f619-24d4-453e-815f-c5301c77799c\") " pod="openstack/keystone-db-sync-4xfns" Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.044367 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pmcqv\" (UniqueName: \"kubernetes.io/projected/89ab6a6b-99ee-4388-92a6-d1e0bb469abc-kube-api-access-pmcqv\") pod \"cinder-32fe-account-create-b4mz2\" (UID: \"89ab6a6b-99ee-4388-92a6-d1e0bb469abc\") " pod="openstack/cinder-32fe-account-create-b4mz2" Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.044539 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v42kq\" (UniqueName: \"kubernetes.io/projected/bf266cb3-661b-4fbd-8162-8da1268957be-kube-api-access-v42kq\") pod \"heat-db-create-7v9gw\" (UID: \"bf266cb3-661b-4fbd-8162-8da1268957be\") " pod="openstack/heat-db-create-7v9gw" Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.044572 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/89ab6a6b-99ee-4388-92a6-d1e0bb469abc-operator-scripts\") pod \"cinder-32fe-account-create-b4mz2\" (UID: \"89ab6a6b-99ee-4388-92a6-d1e0bb469abc\") " pod="openstack/cinder-32fe-account-create-b4mz2" Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.045101 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf266cb3-661b-4fbd-8162-8da1268957be-operator-scripts\") pod \"heat-db-create-7v9gw\" (UID: \"bf266cb3-661b-4fbd-8162-8da1268957be\") " pod="openstack/heat-db-create-7v9gw" Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.045544 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/89ab6a6b-99ee-4388-92a6-d1e0bb469abc-operator-scripts\") pod \"cinder-32fe-account-create-b4mz2\" (UID: \"89ab6a6b-99ee-4388-92a6-d1e0bb469abc\") " pod="openstack/cinder-32fe-account-create-b4mz2" Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.046054 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a253f619-24d4-453e-815f-c5301c77799c-combined-ca-bundle\") pod \"keystone-db-sync-4xfns\" (UID: \"a253f619-24d4-453e-815f-c5301c77799c\") " pod="openstack/keystone-db-sync-4xfns" Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.046943 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-c575-account-create-kpd7g"] Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.053027 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-32fe-account-create-b4mz2" Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.065903 4854 generic.go:334] "Generic (PLEG): container finished" podID="93c535b4-23bb-4c71-8ddc-1304ca205e55" containerID="80dac60a0c43639e6b5f2caa74912607cd28500ca7d30e1213e8689c204c0396" exitCode=0 Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.065955 4854 generic.go:334] "Generic (PLEG): container finished" podID="93c535b4-23bb-4c71-8ddc-1304ca205e55" containerID="9622afa4b2cbb87f9b6fa5ca3f47e7bc5b92acc2a407dc54fa80c47312a3e647" exitCode=0 Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.065968 4854 generic.go:334] "Generic (PLEG): container finished" podID="93c535b4-23bb-4c71-8ddc-1304ca205e55" containerID="e14583c42ce5d5a2261a9b592f1cdc639741839ca7d9929733691ee983791aa3" exitCode=0 Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.066024 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"93c535b4-23bb-4c71-8ddc-1304ca205e55","Type":"ContainerDied","Data":"80dac60a0c43639e6b5f2caa74912607cd28500ca7d30e1213e8689c204c0396"} Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.066063 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"93c535b4-23bb-4c71-8ddc-1304ca205e55","Type":"ContainerDied","Data":"9622afa4b2cbb87f9b6fa5ca3f47e7bc5b92acc2a407dc54fa80c47312a3e647"} Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.066078 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"93c535b4-23bb-4c71-8ddc-1304ca205e55","Type":"ContainerDied","Data":"e14583c42ce5d5a2261a9b592f1cdc639741839ca7d9929733691ee983791aa3"} Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.083003 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f59b8f679-5fq4s" event={"ID":"51aab4f1-d50b-47ba-b45d-e820d83ba125","Type":"ContainerStarted","Data":"660d363b72322a4efe63f9d699e3155dc3aac15c0223fd287c9539a455968e41"} Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.085299 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v42kq\" (UniqueName: \"kubernetes.io/projected/bf266cb3-661b-4fbd-8162-8da1268957be-kube-api-access-v42kq\") pod \"heat-db-create-7v9gw\" (UID: \"bf266cb3-661b-4fbd-8162-8da1268957be\") " pod="openstack/heat-db-create-7v9gw" Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.085370 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5f59b8f679-5fq4s" Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.086451 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6pkgk\" (UniqueName: \"kubernetes.io/projected/a253f619-24d4-453e-815f-c5301c77799c-kube-api-access-6pkgk\") pod \"keystone-db-sync-4xfns\" (UID: \"a253f619-24d4-453e-815f-c5301c77799c\") " pod="openstack/keystone-db-sync-4xfns" Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.115315 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5f59b8f679-5fq4s" podStartSLOduration=3.115291871 podStartE2EDuration="3.115291871s" podCreationTimestamp="2025-11-25 09:57:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:57:36.102043787 +0000 UTC m=+1261.955037173" watchObservedRunningTime="2025-11-25 09:57:36.115291871 +0000 UTC m=+1261.968285247" Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.146833 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cgfkm\" (UniqueName: \"kubernetes.io/projected/ba1f1fb5-ec0c-48be-9fd8-a0fd1a040360-kube-api-access-cgfkm\") pod \"neutron-db-create-f4l8h\" (UID: \"ba1f1fb5-ec0c-48be-9fd8-a0fd1a040360\") " pod="openstack/neutron-db-create-f4l8h" Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.146919 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bs776\" (UniqueName: \"kubernetes.io/projected/7f037bba-7ce0-46ba-8fdf-5f3a3d9183dc-kube-api-access-bs776\") pod \"neutron-c575-account-create-kpd7g\" (UID: \"7f037bba-7ce0-46ba-8fdf-5f3a3d9183dc\") " pod="openstack/neutron-c575-account-create-kpd7g" Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.146996 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ba1f1fb5-ec0c-48be-9fd8-a0fd1a040360-operator-scripts\") pod \"neutron-db-create-f4l8h\" (UID: \"ba1f1fb5-ec0c-48be-9fd8-a0fd1a040360\") " pod="openstack/neutron-db-create-f4l8h" Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.147056 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f037bba-7ce0-46ba-8fdf-5f3a3d9183dc-operator-scripts\") pod \"neutron-c575-account-create-kpd7g\" (UID: \"7f037bba-7ce0-46ba-8fdf-5f3a3d9183dc\") " pod="openstack/neutron-c575-account-create-kpd7g" Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.148604 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ba1f1fb5-ec0c-48be-9fd8-a0fd1a040360-operator-scripts\") pod \"neutron-db-create-f4l8h\" (UID: \"ba1f1fb5-ec0c-48be-9fd8-a0fd1a040360\") " pod="openstack/neutron-db-create-f4l8h" Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.186089 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cgfkm\" (UniqueName: \"kubernetes.io/projected/ba1f1fb5-ec0c-48be-9fd8-a0fd1a040360-kube-api-access-cgfkm\") pod \"neutron-db-create-f4l8h\" (UID: \"ba1f1fb5-ec0c-48be-9fd8-a0fd1a040360\") " pod="openstack/neutron-db-create-f4l8h" Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.259223 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bs776\" (UniqueName: \"kubernetes.io/projected/7f037bba-7ce0-46ba-8fdf-5f3a3d9183dc-kube-api-access-bs776\") pod \"neutron-c575-account-create-kpd7g\" (UID: \"7f037bba-7ce0-46ba-8fdf-5f3a3d9183dc\") " pod="openstack/neutron-c575-account-create-kpd7g" Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.259377 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f037bba-7ce0-46ba-8fdf-5f3a3d9183dc-operator-scripts\") pod \"neutron-c575-account-create-kpd7g\" (UID: \"7f037bba-7ce0-46ba-8fdf-5f3a3d9183dc\") " pod="openstack/neutron-c575-account-create-kpd7g" Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.262167 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f037bba-7ce0-46ba-8fdf-5f3a3d9183dc-operator-scripts\") pod \"neutron-c575-account-create-kpd7g\" (UID: \"7f037bba-7ce0-46ba-8fdf-5f3a3d9183dc\") " pod="openstack/neutron-c575-account-create-kpd7g" Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.309307 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bs776\" (UniqueName: \"kubernetes.io/projected/7f037bba-7ce0-46ba-8fdf-5f3a3d9183dc-kube-api-access-bs776\") pod \"neutron-c575-account-create-kpd7g\" (UID: \"7f037bba-7ce0-46ba-8fdf-5f3a3d9183dc\") " pod="openstack/neutron-c575-account-create-kpd7g" Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.329198 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-7v9gw" Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.364054 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-4xfns" Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.364506 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-m2l2v"] Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.385835 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-f4l8h" Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.390482 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-c575-account-create-kpd7g" Nov 25 09:57:36 crc kubenswrapper[4854]: W1125 09:57:36.412117 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddd430211_c49c_40f1_a776_de76234249eb.slice/crio-bf6dc3a7c519d1df44e672b7c4141504e6dc5c1dfc8f7c9750f18d3a38313109 WatchSource:0}: Error finding container bf6dc3a7c519d1df44e672b7c4141504e6dc5c1dfc8f7c9750f18d3a38313109: Status 404 returned error can't find the container with id bf6dc3a7c519d1df44e672b7c4141504e6dc5c1dfc8f7c9750f18d3a38313109 Nov 25 09:57:36 crc kubenswrapper[4854]: I1125 09:57:36.668036 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-hmkhw"] Nov 25 09:57:36 crc kubenswrapper[4854]: W1125 09:57:36.755347 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod48cb153a_315a_4e7b_adea_51a576cd48a6.slice/crio-1c016ca26e4a2dd8ec5daf24fb7ef38d9b2f571101d59ae35cf0d0d5b06b93a8 WatchSource:0}: Error finding container 1c016ca26e4a2dd8ec5daf24fb7ef38d9b2f571101d59ae35cf0d0d5b06b93a8: Status 404 returned error can't find the container with id 1c016ca26e4a2dd8ec5daf24fb7ef38d9b2f571101d59ae35cf0d0d5b06b93a8 Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.010438 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-d94f-account-create-md9dh"] Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.022534 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.123012 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-fbd2-account-create-wh7nh"] Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.134011 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-fbd2-account-create-wh7nh" event={"ID":"c7341835-d9f5-46b4-b382-364fab8df0ac","Type":"ContainerStarted","Data":"f8c1f16098f50c668ecb6a02a96b63429cb9f0ac7c9ccce87b6c37647b3e255b"} Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.151667 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.151859 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"93c535b4-23bb-4c71-8ddc-1304ca205e55","Type":"ContainerDied","Data":"fd04f3d1c9e26c07b6ea0f597a2b329dcacad50e683e5ce007eedbc90d7979fe"} Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.151903 4854 scope.go:117] "RemoveContainer" containerID="80dac60a0c43639e6b5f2caa74912607cd28500ca7d30e1213e8689c204c0396" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.154478 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-m2l2v" event={"ID":"dd430211-c49c-40f1-a776-de76234249eb","Type":"ContainerStarted","Data":"bf6dc3a7c519d1df44e672b7c4141504e6dc5c1dfc8f7c9750f18d3a38313109"} Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.162630 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-d94f-account-create-md9dh" event={"ID":"b6caae98-1807-47ad-a1b8-ddc1ff33b2d9","Type":"ContainerStarted","Data":"c71f3118da2b95630a6236eec80eafbd35c14189595e77d94cc914b68ddf2c65"} Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.167111 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-hmkhw" event={"ID":"48cb153a-315a-4e7b-adea-51a576cd48a6","Type":"ContainerStarted","Data":"1c016ca26e4a2dd8ec5daf24fb7ef38d9b2f571101d59ae35cf0d0d5b06b93a8"} Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.177137 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-32fe-account-create-b4mz2"] Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.183350 4854 scope.go:117] "RemoveContainer" containerID="9622afa4b2cbb87f9b6fa5ca3f47e7bc5b92acc2a407dc54fa80c47312a3e647" Nov 25 09:57:37 crc kubenswrapper[4854]: W1125 09:57:37.210119 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod89ab6a6b_99ee_4388_92a6_d1e0bb469abc.slice/crio-215893a71583397df67efc741ae7957c26afbefbc1cf9ce61399f68d89e2e7e2 WatchSource:0}: Error finding container 215893a71583397df67efc741ae7957c26afbefbc1cf9ce61399f68d89e2e7e2: Status 404 returned error can't find the container with id 215893a71583397df67efc741ae7957c26afbefbc1cf9ce61399f68d89e2e7e2 Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.227404 4854 scope.go:117] "RemoveContainer" containerID="e14583c42ce5d5a2261a9b592f1cdc639741839ca7d9929733691ee983791aa3" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.234517 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/93c535b4-23bb-4c71-8ddc-1304ca205e55-thanos-prometheus-http-client-file\") pod \"93c535b4-23bb-4c71-8ddc-1304ca205e55\" (UID: \"93c535b4-23bb-4c71-8ddc-1304ca205e55\") " Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.234885 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/93c535b4-23bb-4c71-8ddc-1304ca205e55-web-config\") pod \"93c535b4-23bb-4c71-8ddc-1304ca205e55\" (UID: \"93c535b4-23bb-4c71-8ddc-1304ca205e55\") " Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.234942 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/93c535b4-23bb-4c71-8ddc-1304ca205e55-config\") pod \"93c535b4-23bb-4c71-8ddc-1304ca205e55\" (UID: \"93c535b4-23bb-4c71-8ddc-1304ca205e55\") " Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.234976 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/93c535b4-23bb-4c71-8ddc-1304ca205e55-tls-assets\") pod \"93c535b4-23bb-4c71-8ddc-1304ca205e55\" (UID: \"93c535b4-23bb-4c71-8ddc-1304ca205e55\") " Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.235033 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/93c535b4-23bb-4c71-8ddc-1304ca205e55-prometheus-metric-storage-rulefiles-0\") pod \"93c535b4-23bb-4c71-8ddc-1304ca205e55\" (UID: \"93c535b4-23bb-4c71-8ddc-1304ca205e55\") " Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.235076 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/93c535b4-23bb-4c71-8ddc-1304ca205e55-config-out\") pod \"93c535b4-23bb-4c71-8ddc-1304ca205e55\" (UID: \"93c535b4-23bb-4c71-8ddc-1304ca205e55\") " Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.235250 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-92f6511f-ed36-41a5-a620-d81da35cdd8a\") pod \"93c535b4-23bb-4c71-8ddc-1304ca205e55\" (UID: \"93c535b4-23bb-4c71-8ddc-1304ca205e55\") " Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.235358 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-flk2x\" (UniqueName: \"kubernetes.io/projected/93c535b4-23bb-4c71-8ddc-1304ca205e55-kube-api-access-flk2x\") pod \"93c535b4-23bb-4c71-8ddc-1304ca205e55\" (UID: \"93c535b4-23bb-4c71-8ddc-1304ca205e55\") " Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.237906 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/93c535b4-23bb-4c71-8ddc-1304ca205e55-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "93c535b4-23bb-4c71-8ddc-1304ca205e55" (UID: "93c535b4-23bb-4c71-8ddc-1304ca205e55"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.256095 4854 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/93c535b4-23bb-4c71-8ddc-1304ca205e55-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.259634 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/93c535b4-23bb-4c71-8ddc-1304ca205e55-config-out" (OuterVolumeSpecName: "config-out") pod "93c535b4-23bb-4c71-8ddc-1304ca205e55" (UID: "93c535b4-23bb-4c71-8ddc-1304ca205e55"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.261313 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93c535b4-23bb-4c71-8ddc-1304ca205e55-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "93c535b4-23bb-4c71-8ddc-1304ca205e55" (UID: "93c535b4-23bb-4c71-8ddc-1304ca205e55"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.286592 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93c535b4-23bb-4c71-8ddc-1304ca205e55-config" (OuterVolumeSpecName: "config") pod "93c535b4-23bb-4c71-8ddc-1304ca205e55" (UID: "93c535b4-23bb-4c71-8ddc-1304ca205e55"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.286684 4854 scope.go:117] "RemoveContainer" containerID="a6703b655a254b75add489c6b2c87a50e14d61760651e6996b6ba25d68203c5a" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.300423 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93c535b4-23bb-4c71-8ddc-1304ca205e55-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "93c535b4-23bb-4c71-8ddc-1304ca205e55" (UID: "93c535b4-23bb-4c71-8ddc-1304ca205e55"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.322240 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93c535b4-23bb-4c71-8ddc-1304ca205e55-kube-api-access-flk2x" (OuterVolumeSpecName: "kube-api-access-flk2x") pod "93c535b4-23bb-4c71-8ddc-1304ca205e55" (UID: "93c535b4-23bb-4c71-8ddc-1304ca205e55"). InnerVolumeSpecName "kube-api-access-flk2x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.334801 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-92f6511f-ed36-41a5-a620-d81da35cdd8a" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "93c535b4-23bb-4c71-8ddc-1304ca205e55" (UID: "93c535b4-23bb-4c71-8ddc-1304ca205e55"). InnerVolumeSpecName "pvc-92f6511f-ed36-41a5-a620-d81da35cdd8a". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.370715 4854 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/93c535b4-23bb-4c71-8ddc-1304ca205e55-tls-assets\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.370743 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/93c535b4-23bb-4c71-8ddc-1304ca205e55-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.370752 4854 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/93c535b4-23bb-4c71-8ddc-1304ca205e55-config-out\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.370776 4854 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-92f6511f-ed36-41a5-a620-d81da35cdd8a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-92f6511f-ed36-41a5-a620-d81da35cdd8a\") on node \"crc\" " Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.370790 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-flk2x\" (UniqueName: \"kubernetes.io/projected/93c535b4-23bb-4c71-8ddc-1304ca205e55-kube-api-access-flk2x\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.370800 4854 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/93c535b4-23bb-4c71-8ddc-1304ca205e55-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.398292 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-7v9gw"] Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.424362 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-4xfns"] Nov 25 09:57:37 crc kubenswrapper[4854]: W1125 09:57:37.428853 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbf266cb3_661b_4fbd_8162_8da1268957be.slice/crio-062cd186bd2d34967d4e3028d727f97170e2af9f869b2881ebe0980dae753377 WatchSource:0}: Error finding container 062cd186bd2d34967d4e3028d727f97170e2af9f869b2881ebe0980dae753377: Status 404 returned error can't find the container with id 062cd186bd2d34967d4e3028d727f97170e2af9f869b2881ebe0980dae753377 Nov 25 09:57:37 crc kubenswrapper[4854]: W1125 09:57:37.431863 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda253f619_24d4_453e_815f_c5301c77799c.slice/crio-4d1e16e0e4b1fdb164657b3fc6e21ece8811114c8bbcdfb858ee4855ac6e7813 WatchSource:0}: Error finding container 4d1e16e0e4b1fdb164657b3fc6e21ece8811114c8bbcdfb858ee4855ac6e7813: Status 404 returned error can't find the container with id 4d1e16e0e4b1fdb164657b3fc6e21ece8811114c8bbcdfb858ee4855ac6e7813 Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.433820 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93c535b4-23bb-4c71-8ddc-1304ca205e55-web-config" (OuterVolumeSpecName: "web-config") pod "93c535b4-23bb-4c71-8ddc-1304ca205e55" (UID: "93c535b4-23bb-4c71-8ddc-1304ca205e55"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.450459 4854 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.450618 4854 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-92f6511f-ed36-41a5-a620-d81da35cdd8a" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-92f6511f-ed36-41a5-a620-d81da35cdd8a") on node "crc" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.476498 4854 reconciler_common.go:293] "Volume detached for volume \"pvc-92f6511f-ed36-41a5-a620-d81da35cdd8a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-92f6511f-ed36-41a5-a620-d81da35cdd8a\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.476526 4854 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/93c535b4-23bb-4c71-8ddc-1304ca205e55-web-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.529098 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.566761 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.613585 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 09:57:37 crc kubenswrapper[4854]: E1125 09:57:37.617327 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93c535b4-23bb-4c71-8ddc-1304ca205e55" containerName="config-reloader" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.617364 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="93c535b4-23bb-4c71-8ddc-1304ca205e55" containerName="config-reloader" Nov 25 09:57:37 crc kubenswrapper[4854]: E1125 09:57:37.617390 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93c535b4-23bb-4c71-8ddc-1304ca205e55" containerName="init-config-reloader" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.617398 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="93c535b4-23bb-4c71-8ddc-1304ca205e55" containerName="init-config-reloader" Nov 25 09:57:37 crc kubenswrapper[4854]: E1125 09:57:37.617411 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93c535b4-23bb-4c71-8ddc-1304ca205e55" containerName="thanos-sidecar" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.617419 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="93c535b4-23bb-4c71-8ddc-1304ca205e55" containerName="thanos-sidecar" Nov 25 09:57:37 crc kubenswrapper[4854]: E1125 09:57:37.617438 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93c535b4-23bb-4c71-8ddc-1304ca205e55" containerName="prometheus" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.617446 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="93c535b4-23bb-4c71-8ddc-1304ca205e55" containerName="prometheus" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.617802 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="93c535b4-23bb-4c71-8ddc-1304ca205e55" containerName="prometheus" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.617828 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="93c535b4-23bb-4c71-8ddc-1304ca205e55" containerName="thanos-sidecar" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.617843 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="93c535b4-23bb-4c71-8ddc-1304ca205e55" containerName="config-reloader" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.620877 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.631100 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.631332 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.631523 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.631644 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-t6crr" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.632050 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.633276 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.639853 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.653038 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.690318 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-c575-account-create-kpd7g"] Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.733297 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-f4l8h"] Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.790069 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/558820bb-fc66-444a-96d3-107dbc60fb3f-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"558820bb-fc66-444a-96d3-107dbc60fb3f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.790147 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/558820bb-fc66-444a-96d3-107dbc60fb3f-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"558820bb-fc66-444a-96d3-107dbc60fb3f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.790227 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/558820bb-fc66-444a-96d3-107dbc60fb3f-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"558820bb-fc66-444a-96d3-107dbc60fb3f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.790271 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-92f6511f-ed36-41a5-a620-d81da35cdd8a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-92f6511f-ed36-41a5-a620-d81da35cdd8a\") pod \"prometheus-metric-storage-0\" (UID: \"558820bb-fc66-444a-96d3-107dbc60fb3f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.790302 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/558820bb-fc66-444a-96d3-107dbc60fb3f-config\") pod \"prometheus-metric-storage-0\" (UID: \"558820bb-fc66-444a-96d3-107dbc60fb3f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.790361 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/558820bb-fc66-444a-96d3-107dbc60fb3f-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"558820bb-fc66-444a-96d3-107dbc60fb3f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.790396 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/558820bb-fc66-444a-96d3-107dbc60fb3f-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"558820bb-fc66-444a-96d3-107dbc60fb3f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.790450 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/558820bb-fc66-444a-96d3-107dbc60fb3f-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"558820bb-fc66-444a-96d3-107dbc60fb3f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.790524 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/558820bb-fc66-444a-96d3-107dbc60fb3f-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"558820bb-fc66-444a-96d3-107dbc60fb3f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.790553 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/558820bb-fc66-444a-96d3-107dbc60fb3f-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"558820bb-fc66-444a-96d3-107dbc60fb3f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.790571 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9dnlm\" (UniqueName: \"kubernetes.io/projected/558820bb-fc66-444a-96d3-107dbc60fb3f-kube-api-access-9dnlm\") pod \"prometheus-metric-storage-0\" (UID: \"558820bb-fc66-444a-96d3-107dbc60fb3f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.898726 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/558820bb-fc66-444a-96d3-107dbc60fb3f-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"558820bb-fc66-444a-96d3-107dbc60fb3f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.898793 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/558820bb-fc66-444a-96d3-107dbc60fb3f-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"558820bb-fc66-444a-96d3-107dbc60fb3f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.898845 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/558820bb-fc66-444a-96d3-107dbc60fb3f-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"558820bb-fc66-444a-96d3-107dbc60fb3f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.898874 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-92f6511f-ed36-41a5-a620-d81da35cdd8a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-92f6511f-ed36-41a5-a620-d81da35cdd8a\") pod \"prometheus-metric-storage-0\" (UID: \"558820bb-fc66-444a-96d3-107dbc60fb3f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.898897 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/558820bb-fc66-444a-96d3-107dbc60fb3f-config\") pod \"prometheus-metric-storage-0\" (UID: \"558820bb-fc66-444a-96d3-107dbc60fb3f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.898920 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/558820bb-fc66-444a-96d3-107dbc60fb3f-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"558820bb-fc66-444a-96d3-107dbc60fb3f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.898948 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/558820bb-fc66-444a-96d3-107dbc60fb3f-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"558820bb-fc66-444a-96d3-107dbc60fb3f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.898984 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/558820bb-fc66-444a-96d3-107dbc60fb3f-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"558820bb-fc66-444a-96d3-107dbc60fb3f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.899035 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/558820bb-fc66-444a-96d3-107dbc60fb3f-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"558820bb-fc66-444a-96d3-107dbc60fb3f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.899057 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/558820bb-fc66-444a-96d3-107dbc60fb3f-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"558820bb-fc66-444a-96d3-107dbc60fb3f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.899072 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9dnlm\" (UniqueName: \"kubernetes.io/projected/558820bb-fc66-444a-96d3-107dbc60fb3f-kube-api-access-9dnlm\") pod \"prometheus-metric-storage-0\" (UID: \"558820bb-fc66-444a-96d3-107dbc60fb3f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.901217 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/558820bb-fc66-444a-96d3-107dbc60fb3f-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"558820bb-fc66-444a-96d3-107dbc60fb3f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.908347 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/558820bb-fc66-444a-96d3-107dbc60fb3f-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"558820bb-fc66-444a-96d3-107dbc60fb3f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.916370 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/558820bb-fc66-444a-96d3-107dbc60fb3f-config\") pod \"prometheus-metric-storage-0\" (UID: \"558820bb-fc66-444a-96d3-107dbc60fb3f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.916607 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/558820bb-fc66-444a-96d3-107dbc60fb3f-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"558820bb-fc66-444a-96d3-107dbc60fb3f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.916689 4854 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.916742 4854 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-92f6511f-ed36-41a5-a620-d81da35cdd8a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-92f6511f-ed36-41a5-a620-d81da35cdd8a\") pod \"prometheus-metric-storage-0\" (UID: \"558820bb-fc66-444a-96d3-107dbc60fb3f\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/8135b1691aea57bacf9aa6f6ce849194fda4d61ab30e82c5a91a76373bce7e14/globalmount\"" pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.916772 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/558820bb-fc66-444a-96d3-107dbc60fb3f-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"558820bb-fc66-444a-96d3-107dbc60fb3f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.919958 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/558820bb-fc66-444a-96d3-107dbc60fb3f-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"558820bb-fc66-444a-96d3-107dbc60fb3f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.920362 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/558820bb-fc66-444a-96d3-107dbc60fb3f-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"558820bb-fc66-444a-96d3-107dbc60fb3f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.939915 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/558820bb-fc66-444a-96d3-107dbc60fb3f-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"558820bb-fc66-444a-96d3-107dbc60fb3f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.956647 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/558820bb-fc66-444a-96d3-107dbc60fb3f-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"558820bb-fc66-444a-96d3-107dbc60fb3f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:37 crc kubenswrapper[4854]: I1125 09:57:37.981950 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9dnlm\" (UniqueName: \"kubernetes.io/projected/558820bb-fc66-444a-96d3-107dbc60fb3f-kube-api-access-9dnlm\") pod \"prometheus-metric-storage-0\" (UID: \"558820bb-fc66-444a-96d3-107dbc60fb3f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:38 crc kubenswrapper[4854]: I1125 09:57:38.158130 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-92f6511f-ed36-41a5-a620-d81da35cdd8a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-92f6511f-ed36-41a5-a620-d81da35cdd8a\") pod \"prometheus-metric-storage-0\" (UID: \"558820bb-fc66-444a-96d3-107dbc60fb3f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:38 crc kubenswrapper[4854]: I1125 09:57:38.178442 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-m2l2v" event={"ID":"dd430211-c49c-40f1-a776-de76234249eb","Type":"ContainerStarted","Data":"7d0ddd9a2f9ff19c05e9f3a35af705f152f76840629a045c10895eef1fd68684"} Nov 25 09:57:38 crc kubenswrapper[4854]: I1125 09:57:38.180065 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-f4l8h" event={"ID":"ba1f1fb5-ec0c-48be-9fd8-a0fd1a040360","Type":"ContainerStarted","Data":"c4802c24326ffb7c0645d440210ee4de80835584e073e0024180c158090676c7"} Nov 25 09:57:38 crc kubenswrapper[4854]: I1125 09:57:38.181334 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-32fe-account-create-b4mz2" event={"ID":"89ab6a6b-99ee-4388-92a6-d1e0bb469abc","Type":"ContainerStarted","Data":"215893a71583397df67efc741ae7957c26afbefbc1cf9ce61399f68d89e2e7e2"} Nov 25 09:57:38 crc kubenswrapper[4854]: I1125 09:57:38.183889 4854 generic.go:334] "Generic (PLEG): container finished" podID="48cb153a-315a-4e7b-adea-51a576cd48a6" containerID="4a041cdb915f0473f0e051a9de5d7ed6834c52efbf7e77de3a0c70381628bbb0" exitCode=0 Nov 25 09:57:38 crc kubenswrapper[4854]: I1125 09:57:38.184064 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-hmkhw" event={"ID":"48cb153a-315a-4e7b-adea-51a576cd48a6","Type":"ContainerDied","Data":"4a041cdb915f0473f0e051a9de5d7ed6834c52efbf7e77de3a0c70381628bbb0"} Nov 25 09:57:38 crc kubenswrapper[4854]: I1125 09:57:38.185553 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-7v9gw" event={"ID":"bf266cb3-661b-4fbd-8162-8da1268957be","Type":"ContainerStarted","Data":"062cd186bd2d34967d4e3028d727f97170e2af9f869b2881ebe0980dae753377"} Nov 25 09:57:38 crc kubenswrapper[4854]: I1125 09:57:38.187193 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-4xfns" event={"ID":"a253f619-24d4-453e-815f-c5301c77799c","Type":"ContainerStarted","Data":"4d1e16e0e4b1fdb164657b3fc6e21ece8811114c8bbcdfb858ee4855ac6e7813"} Nov 25 09:57:38 crc kubenswrapper[4854]: I1125 09:57:38.188755 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:38 crc kubenswrapper[4854]: I1125 09:57:38.193143 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-fbd2-account-create-wh7nh" event={"ID":"c7341835-d9f5-46b4-b382-364fab8df0ac","Type":"ContainerStarted","Data":"0491b636aa9ffb5c6cb3b6acc5f0a46ffd190ab374a3b71623af60793f016a8f"} Nov 25 09:57:38 crc kubenswrapper[4854]: I1125 09:57:38.201904 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-c575-account-create-kpd7g" event={"ID":"7f037bba-7ce0-46ba-8fdf-5f3a3d9183dc","Type":"ContainerStarted","Data":"ccd9a756779f22df1d1c326552509b22d12d0b09565e145199e8bae9df16540c"} Nov 25 09:57:38 crc kubenswrapper[4854]: I1125 09:57:38.223822 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-d94f-account-create-md9dh" event={"ID":"b6caae98-1807-47ad-a1b8-ddc1ff33b2d9","Type":"ContainerStarted","Data":"8331dbd97c5f158d8d4bfa4859d8a4f0e025df23976af7935ba58a65c8fddb53"} Nov 25 09:57:38 crc kubenswrapper[4854]: I1125 09:57:38.240572 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-create-m2l2v" podStartSLOduration=3.240550559 podStartE2EDuration="3.240550559s" podCreationTimestamp="2025-11-25 09:57:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:57:38.212122896 +0000 UTC m=+1264.065116272" watchObservedRunningTime="2025-11-25 09:57:38.240550559 +0000 UTC m=+1264.093543935" Nov 25 09:57:38 crc kubenswrapper[4854]: I1125 09:57:38.260572 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-fbd2-account-create-wh7nh" podStartSLOduration=3.260553399 podStartE2EDuration="3.260553399s" podCreationTimestamp="2025-11-25 09:57:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:57:38.253109325 +0000 UTC m=+1264.106102711" watchObservedRunningTime="2025-11-25 09:57:38.260553399 +0000 UTC m=+1264.113546775" Nov 25 09:57:38 crc kubenswrapper[4854]: I1125 09:57:38.276613 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-d94f-account-create-md9dh" podStartSLOduration=3.276595141 podStartE2EDuration="3.276595141s" podCreationTimestamp="2025-11-25 09:57:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:57:38.271209843 +0000 UTC m=+1264.124203229" watchObservedRunningTime="2025-11-25 09:57:38.276595141 +0000 UTC m=+1264.129588517" Nov 25 09:57:38 crc kubenswrapper[4854]: I1125 09:57:38.881189 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 09:57:39 crc kubenswrapper[4854]: I1125 09:57:39.031164 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="93c535b4-23bb-4c71-8ddc-1304ca205e55" path="/var/lib/kubelet/pods/93c535b4-23bb-4c71-8ddc-1304ca205e55/volumes" Nov 25 09:57:39 crc kubenswrapper[4854]: I1125 09:57:39.238069 4854 generic.go:334] "Generic (PLEG): container finished" podID="7f037bba-7ce0-46ba-8fdf-5f3a3d9183dc" containerID="ff773b5022ada182235f3b5c352a4451615969efc51224aa361b23293584e973" exitCode=0 Nov 25 09:57:39 crc kubenswrapper[4854]: I1125 09:57:39.238283 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-c575-account-create-kpd7g" event={"ID":"7f037bba-7ce0-46ba-8fdf-5f3a3d9183dc","Type":"ContainerDied","Data":"ff773b5022ada182235f3b5c352a4451615969efc51224aa361b23293584e973"} Nov 25 09:57:39 crc kubenswrapper[4854]: I1125 09:57:39.241924 4854 generic.go:334] "Generic (PLEG): container finished" podID="c7341835-d9f5-46b4-b382-364fab8df0ac" containerID="0491b636aa9ffb5c6cb3b6acc5f0a46ffd190ab374a3b71623af60793f016a8f" exitCode=0 Nov 25 09:57:39 crc kubenswrapper[4854]: I1125 09:57:39.242031 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-fbd2-account-create-wh7nh" event={"ID":"c7341835-d9f5-46b4-b382-364fab8df0ac","Type":"ContainerDied","Data":"0491b636aa9ffb5c6cb3b6acc5f0a46ffd190ab374a3b71623af60793f016a8f"} Nov 25 09:57:39 crc kubenswrapper[4854]: I1125 09:57:39.243838 4854 generic.go:334] "Generic (PLEG): container finished" podID="dd430211-c49c-40f1-a776-de76234249eb" containerID="7d0ddd9a2f9ff19c05e9f3a35af705f152f76840629a045c10895eef1fd68684" exitCode=0 Nov 25 09:57:39 crc kubenswrapper[4854]: I1125 09:57:39.243896 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-m2l2v" event={"ID":"dd430211-c49c-40f1-a776-de76234249eb","Type":"ContainerDied","Data":"7d0ddd9a2f9ff19c05e9f3a35af705f152f76840629a045c10895eef1fd68684"} Nov 25 09:57:39 crc kubenswrapper[4854]: I1125 09:57:39.245839 4854 generic.go:334] "Generic (PLEG): container finished" podID="ba1f1fb5-ec0c-48be-9fd8-a0fd1a040360" containerID="6847d5629dfe97b64f42dec56e4ee75497bdc8207fdcd3979b11bbcdce32c140" exitCode=0 Nov 25 09:57:39 crc kubenswrapper[4854]: I1125 09:57:39.245906 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-f4l8h" event={"ID":"ba1f1fb5-ec0c-48be-9fd8-a0fd1a040360","Type":"ContainerDied","Data":"6847d5629dfe97b64f42dec56e4ee75497bdc8207fdcd3979b11bbcdce32c140"} Nov 25 09:57:39 crc kubenswrapper[4854]: I1125 09:57:39.249057 4854 generic.go:334] "Generic (PLEG): container finished" podID="89ab6a6b-99ee-4388-92a6-d1e0bb469abc" containerID="1fc7dc122c45fb487247878641dc22a52eb26553b44ba9c5877212dd062970dc" exitCode=0 Nov 25 09:57:39 crc kubenswrapper[4854]: I1125 09:57:39.249123 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-32fe-account-create-b4mz2" event={"ID":"89ab6a6b-99ee-4388-92a6-d1e0bb469abc","Type":"ContainerDied","Data":"1fc7dc122c45fb487247878641dc22a52eb26553b44ba9c5877212dd062970dc"} Nov 25 09:57:39 crc kubenswrapper[4854]: I1125 09:57:39.251207 4854 generic.go:334] "Generic (PLEG): container finished" podID="b6caae98-1807-47ad-a1b8-ddc1ff33b2d9" containerID="8331dbd97c5f158d8d4bfa4859d8a4f0e025df23976af7935ba58a65c8fddb53" exitCode=0 Nov 25 09:57:39 crc kubenswrapper[4854]: I1125 09:57:39.251284 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-d94f-account-create-md9dh" event={"ID":"b6caae98-1807-47ad-a1b8-ddc1ff33b2d9","Type":"ContainerDied","Data":"8331dbd97c5f158d8d4bfa4859d8a4f0e025df23976af7935ba58a65c8fddb53"} Nov 25 09:57:39 crc kubenswrapper[4854]: I1125 09:57:39.256599 4854 generic.go:334] "Generic (PLEG): container finished" podID="bf266cb3-661b-4fbd-8162-8da1268957be" containerID="17c1d0e49e0440a3b70852177d5cad6b9752c11c9cd22892488e81922e5f82e9" exitCode=0 Nov 25 09:57:39 crc kubenswrapper[4854]: I1125 09:57:39.256658 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-7v9gw" event={"ID":"bf266cb3-661b-4fbd-8162-8da1268957be","Type":"ContainerDied","Data":"17c1d0e49e0440a3b70852177d5cad6b9752c11c9cd22892488e81922e5f82e9"} Nov 25 09:57:39 crc kubenswrapper[4854]: I1125 09:57:39.263006 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"558820bb-fc66-444a-96d3-107dbc60fb3f","Type":"ContainerStarted","Data":"e7cfe584dc9b1944cec467f2350cd8798f13b3c2b8d964c1efe0f591f63628a8"} Nov 25 09:57:39 crc kubenswrapper[4854]: I1125 09:57:39.869229 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-hmkhw" Nov 25 09:57:39 crc kubenswrapper[4854]: I1125 09:57:39.920888 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/prometheus-metric-storage-0" podUID="93c535b4-23bb-4c71-8ddc-1304ca205e55" containerName="prometheus" probeResult="failure" output="Get \"http://10.217.0.139:9090/-/ready\": dial tcp 10.217.0.139:9090: i/o timeout (Client.Timeout exceeded while awaiting headers)" Nov 25 09:57:39 crc kubenswrapper[4854]: I1125 09:57:39.952583 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/48cb153a-315a-4e7b-adea-51a576cd48a6-operator-scripts\") pod \"48cb153a-315a-4e7b-adea-51a576cd48a6\" (UID: \"48cb153a-315a-4e7b-adea-51a576cd48a6\") " Nov 25 09:57:39 crc kubenswrapper[4854]: I1125 09:57:39.952714 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xc4q4\" (UniqueName: \"kubernetes.io/projected/48cb153a-315a-4e7b-adea-51a576cd48a6-kube-api-access-xc4q4\") pod \"48cb153a-315a-4e7b-adea-51a576cd48a6\" (UID: \"48cb153a-315a-4e7b-adea-51a576cd48a6\") " Nov 25 09:57:39 crc kubenswrapper[4854]: I1125 09:57:39.956371 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/48cb153a-315a-4e7b-adea-51a576cd48a6-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "48cb153a-315a-4e7b-adea-51a576cd48a6" (UID: "48cb153a-315a-4e7b-adea-51a576cd48a6"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:57:39 crc kubenswrapper[4854]: I1125 09:57:39.964751 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/48cb153a-315a-4e7b-adea-51a576cd48a6-kube-api-access-xc4q4" (OuterVolumeSpecName: "kube-api-access-xc4q4") pod "48cb153a-315a-4e7b-adea-51a576cd48a6" (UID: "48cb153a-315a-4e7b-adea-51a576cd48a6"). InnerVolumeSpecName "kube-api-access-xc4q4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:57:40 crc kubenswrapper[4854]: I1125 09:57:40.056552 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xc4q4\" (UniqueName: \"kubernetes.io/projected/48cb153a-315a-4e7b-adea-51a576cd48a6-kube-api-access-xc4q4\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:40 crc kubenswrapper[4854]: I1125 09:57:40.056589 4854 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/48cb153a-315a-4e7b-adea-51a576cd48a6-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:40 crc kubenswrapper[4854]: I1125 09:57:40.278237 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-hmkhw" event={"ID":"48cb153a-315a-4e7b-adea-51a576cd48a6","Type":"ContainerDied","Data":"1c016ca26e4a2dd8ec5daf24fb7ef38d9b2f571101d59ae35cf0d0d5b06b93a8"} Nov 25 09:57:40 crc kubenswrapper[4854]: I1125 09:57:40.278277 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-hmkhw" Nov 25 09:57:40 crc kubenswrapper[4854]: I1125 09:57:40.278288 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1c016ca26e4a2dd8ec5daf24fb7ef38d9b2f571101d59ae35cf0d0d5b06b93a8" Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.317722 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"558820bb-fc66-444a-96d3-107dbc60fb3f","Type":"ContainerStarted","Data":"031e0ad54e70165472a3cf59a3cad7f235349b2cde4059cedb137c9df7812b7e"} Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.774227 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-d94f-account-create-md9dh" Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.777332 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5f59b8f679-5fq4s" Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.786925 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-f4l8h" Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.821033 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-fbd2-account-create-wh7nh" Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.821168 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-7v9gw" Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.827997 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-m2l2v" Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.853976 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6caae98-1807-47ad-a1b8-ddc1ff33b2d9-operator-scripts\") pod \"b6caae98-1807-47ad-a1b8-ddc1ff33b2d9\" (UID: \"b6caae98-1807-47ad-a1b8-ddc1ff33b2d9\") " Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.854010 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cgfkm\" (UniqueName: \"kubernetes.io/projected/ba1f1fb5-ec0c-48be-9fd8-a0fd1a040360-kube-api-access-cgfkm\") pod \"ba1f1fb5-ec0c-48be-9fd8-a0fd1a040360\" (UID: \"ba1f1fb5-ec0c-48be-9fd8-a0fd1a040360\") " Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.854086 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v9db7\" (UniqueName: \"kubernetes.io/projected/b6caae98-1807-47ad-a1b8-ddc1ff33b2d9-kube-api-access-v9db7\") pod \"b6caae98-1807-47ad-a1b8-ddc1ff33b2d9\" (UID: \"b6caae98-1807-47ad-a1b8-ddc1ff33b2d9\") " Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.854412 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ba1f1fb5-ec0c-48be-9fd8-a0fd1a040360-operator-scripts\") pod \"ba1f1fb5-ec0c-48be-9fd8-a0fd1a040360\" (UID: \"ba1f1fb5-ec0c-48be-9fd8-a0fd1a040360\") " Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.855896 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6caae98-1807-47ad-a1b8-ddc1ff33b2d9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b6caae98-1807-47ad-a1b8-ddc1ff33b2d9" (UID: "b6caae98-1807-47ad-a1b8-ddc1ff33b2d9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.856395 4854 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6caae98-1807-47ad-a1b8-ddc1ff33b2d9-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.857637 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ba1f1fb5-ec0c-48be-9fd8-a0fd1a040360-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ba1f1fb5-ec0c-48be-9fd8-a0fd1a040360" (UID: "ba1f1fb5-ec0c-48be-9fd8-a0fd1a040360"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.871405 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba1f1fb5-ec0c-48be-9fd8-a0fd1a040360-kube-api-access-cgfkm" (OuterVolumeSpecName: "kube-api-access-cgfkm") pod "ba1f1fb5-ec0c-48be-9fd8-a0fd1a040360" (UID: "ba1f1fb5-ec0c-48be-9fd8-a0fd1a040360"). InnerVolumeSpecName "kube-api-access-cgfkm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.871809 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6caae98-1807-47ad-a1b8-ddc1ff33b2d9-kube-api-access-v9db7" (OuterVolumeSpecName: "kube-api-access-v9db7") pod "b6caae98-1807-47ad-a1b8-ddc1ff33b2d9" (UID: "b6caae98-1807-47ad-a1b8-ddc1ff33b2d9"). InnerVolumeSpecName "kube-api-access-v9db7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.873332 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-32fe-account-create-b4mz2" Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.909723 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-qf5fg"] Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.910109 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-b8fbc5445-qf5fg" podUID="8661757b-9fae-4808-9568-3997a0b0c7b6" containerName="dnsmasq-dns" containerID="cri-o://69aac727da93d49e87569df43f08d80deb5ad646479eb358200a163bc2cd1b6b" gracePeriod=10 Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.957922 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v42kq\" (UniqueName: \"kubernetes.io/projected/bf266cb3-661b-4fbd-8162-8da1268957be-kube-api-access-v42kq\") pod \"bf266cb3-661b-4fbd-8162-8da1268957be\" (UID: \"bf266cb3-661b-4fbd-8162-8da1268957be\") " Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.957976 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dd430211-c49c-40f1-a776-de76234249eb-operator-scripts\") pod \"dd430211-c49c-40f1-a776-de76234249eb\" (UID: \"dd430211-c49c-40f1-a776-de76234249eb\") " Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.957996 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bj699\" (UniqueName: \"kubernetes.io/projected/c7341835-d9f5-46b4-b382-364fab8df0ac-kube-api-access-bj699\") pod \"c7341835-d9f5-46b4-b382-364fab8df0ac\" (UID: \"c7341835-d9f5-46b4-b382-364fab8df0ac\") " Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.958173 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkjjm\" (UniqueName: \"kubernetes.io/projected/dd430211-c49c-40f1-a776-de76234249eb-kube-api-access-zkjjm\") pod \"dd430211-c49c-40f1-a776-de76234249eb\" (UID: \"dd430211-c49c-40f1-a776-de76234249eb\") " Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.958638 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dd430211-c49c-40f1-a776-de76234249eb-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "dd430211-c49c-40f1-a776-de76234249eb" (UID: "dd430211-c49c-40f1-a776-de76234249eb"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.959210 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pmcqv\" (UniqueName: \"kubernetes.io/projected/89ab6a6b-99ee-4388-92a6-d1e0bb469abc-kube-api-access-pmcqv\") pod \"89ab6a6b-99ee-4388-92a6-d1e0bb469abc\" (UID: \"89ab6a6b-99ee-4388-92a6-d1e0bb469abc\") " Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.959304 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf266cb3-661b-4fbd-8162-8da1268957be-operator-scripts\") pod \"bf266cb3-661b-4fbd-8162-8da1268957be\" (UID: \"bf266cb3-661b-4fbd-8162-8da1268957be\") " Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.959334 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c7341835-d9f5-46b4-b382-364fab8df0ac-operator-scripts\") pod \"c7341835-d9f5-46b4-b382-364fab8df0ac\" (UID: \"c7341835-d9f5-46b4-b382-364fab8df0ac\") " Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.959364 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/89ab6a6b-99ee-4388-92a6-d1e0bb469abc-operator-scripts\") pod \"89ab6a6b-99ee-4388-92a6-d1e0bb469abc\" (UID: \"89ab6a6b-99ee-4388-92a6-d1e0bb469abc\") " Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.959720 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf266cb3-661b-4fbd-8162-8da1268957be-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bf266cb3-661b-4fbd-8162-8da1268957be" (UID: "bf266cb3-661b-4fbd-8162-8da1268957be"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.960034 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/89ab6a6b-99ee-4388-92a6-d1e0bb469abc-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "89ab6a6b-99ee-4388-92a6-d1e0bb469abc" (UID: "89ab6a6b-99ee-4388-92a6-d1e0bb469abc"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.960612 4854 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dd430211-c49c-40f1-a776-de76234249eb-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.960636 4854 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ba1f1fb5-ec0c-48be-9fd8-a0fd1a040360-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.960646 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cgfkm\" (UniqueName: \"kubernetes.io/projected/ba1f1fb5-ec0c-48be-9fd8-a0fd1a040360-kube-api-access-cgfkm\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.960658 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v9db7\" (UniqueName: \"kubernetes.io/projected/b6caae98-1807-47ad-a1b8-ddc1ff33b2d9-kube-api-access-v9db7\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.960667 4854 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf266cb3-661b-4fbd-8162-8da1268957be-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.960676 4854 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/89ab6a6b-99ee-4388-92a6-d1e0bb469abc-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.963243 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c7341835-d9f5-46b4-b382-364fab8df0ac-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c7341835-d9f5-46b4-b382-364fab8df0ac" (UID: "c7341835-d9f5-46b4-b382-364fab8df0ac"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.964083 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/89ab6a6b-99ee-4388-92a6-d1e0bb469abc-kube-api-access-pmcqv" (OuterVolumeSpecName: "kube-api-access-pmcqv") pod "89ab6a6b-99ee-4388-92a6-d1e0bb469abc" (UID: "89ab6a6b-99ee-4388-92a6-d1e0bb469abc"). InnerVolumeSpecName "kube-api-access-pmcqv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.966114 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd430211-c49c-40f1-a776-de76234249eb-kube-api-access-zkjjm" (OuterVolumeSpecName: "kube-api-access-zkjjm") pod "dd430211-c49c-40f1-a776-de76234249eb" (UID: "dd430211-c49c-40f1-a776-de76234249eb"). InnerVolumeSpecName "kube-api-access-zkjjm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.966148 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf266cb3-661b-4fbd-8162-8da1268957be-kube-api-access-v42kq" (OuterVolumeSpecName: "kube-api-access-v42kq") pod "bf266cb3-661b-4fbd-8162-8da1268957be" (UID: "bf266cb3-661b-4fbd-8162-8da1268957be"). InnerVolumeSpecName "kube-api-access-v42kq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:57:43 crc kubenswrapper[4854]: I1125 09:57:43.967017 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7341835-d9f5-46b4-b382-364fab8df0ac-kube-api-access-bj699" (OuterVolumeSpecName: "kube-api-access-bj699") pod "c7341835-d9f5-46b4-b382-364fab8df0ac" (UID: "c7341835-d9f5-46b4-b382-364fab8df0ac"). InnerVolumeSpecName "kube-api-access-bj699". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.063292 4854 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c7341835-d9f5-46b4-b382-364fab8df0ac-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.063498 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v42kq\" (UniqueName: \"kubernetes.io/projected/bf266cb3-661b-4fbd-8162-8da1268957be-kube-api-access-v42kq\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.063556 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bj699\" (UniqueName: \"kubernetes.io/projected/c7341835-d9f5-46b4-b382-364fab8df0ac-kube-api-access-bj699\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.063771 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkjjm\" (UniqueName: \"kubernetes.io/projected/dd430211-c49c-40f1-a776-de76234249eb-kube-api-access-zkjjm\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.063853 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pmcqv\" (UniqueName: \"kubernetes.io/projected/89ab6a6b-99ee-4388-92a6-d1e0bb469abc-kube-api-access-pmcqv\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.069136 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-c575-account-create-kpd7g" Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.165567 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bs776\" (UniqueName: \"kubernetes.io/projected/7f037bba-7ce0-46ba-8fdf-5f3a3d9183dc-kube-api-access-bs776\") pod \"7f037bba-7ce0-46ba-8fdf-5f3a3d9183dc\" (UID: \"7f037bba-7ce0-46ba-8fdf-5f3a3d9183dc\") " Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.165940 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f037bba-7ce0-46ba-8fdf-5f3a3d9183dc-operator-scripts\") pod \"7f037bba-7ce0-46ba-8fdf-5f3a3d9183dc\" (UID: \"7f037bba-7ce0-46ba-8fdf-5f3a3d9183dc\") " Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.166311 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7f037bba-7ce0-46ba-8fdf-5f3a3d9183dc-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7f037bba-7ce0-46ba-8fdf-5f3a3d9183dc" (UID: "7f037bba-7ce0-46ba-8fdf-5f3a3d9183dc"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.166614 4854 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f037bba-7ce0-46ba-8fdf-5f3a3d9183dc-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.173848 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f037bba-7ce0-46ba-8fdf-5f3a3d9183dc-kube-api-access-bs776" (OuterVolumeSpecName: "kube-api-access-bs776") pod "7f037bba-7ce0-46ba-8fdf-5f3a3d9183dc" (UID: "7f037bba-7ce0-46ba-8fdf-5f3a3d9183dc"). InnerVolumeSpecName "kube-api-access-bs776". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.268031 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bs776\" (UniqueName: \"kubernetes.io/projected/7f037bba-7ce0-46ba-8fdf-5f3a3d9183dc-kube-api-access-bs776\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.331488 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-f4l8h" event={"ID":"ba1f1fb5-ec0c-48be-9fd8-a0fd1a040360","Type":"ContainerDied","Data":"c4802c24326ffb7c0645d440210ee4de80835584e073e0024180c158090676c7"} Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.331530 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c4802c24326ffb7c0645d440210ee4de80835584e073e0024180c158090676c7" Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.331496 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-f4l8h" Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.333290 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-32fe-account-create-b4mz2" event={"ID":"89ab6a6b-99ee-4388-92a6-d1e0bb469abc","Type":"ContainerDied","Data":"215893a71583397df67efc741ae7957c26afbefbc1cf9ce61399f68d89e2e7e2"} Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.333359 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="215893a71583397df67efc741ae7957c26afbefbc1cf9ce61399f68d89e2e7e2" Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.333399 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-32fe-account-create-b4mz2" Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.336991 4854 generic.go:334] "Generic (PLEG): container finished" podID="8661757b-9fae-4808-9568-3997a0b0c7b6" containerID="69aac727da93d49e87569df43f08d80deb5ad646479eb358200a163bc2cd1b6b" exitCode=0 Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.337053 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-qf5fg" event={"ID":"8661757b-9fae-4808-9568-3997a0b0c7b6","Type":"ContainerDied","Data":"69aac727da93d49e87569df43f08d80deb5ad646479eb358200a163bc2cd1b6b"} Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.342645 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-fbd2-account-create-wh7nh" Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.342656 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-fbd2-account-create-wh7nh" event={"ID":"c7341835-d9f5-46b4-b382-364fab8df0ac","Type":"ContainerDied","Data":"f8c1f16098f50c668ecb6a02a96b63429cb9f0ac7c9ccce87b6c37647b3e255b"} Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.342767 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f8c1f16098f50c668ecb6a02a96b63429cb9f0ac7c9ccce87b6c37647b3e255b" Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.346955 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-c575-account-create-kpd7g" event={"ID":"7f037bba-7ce0-46ba-8fdf-5f3a3d9183dc","Type":"ContainerDied","Data":"ccd9a756779f22df1d1c326552509b22d12d0b09565e145199e8bae9df16540c"} Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.346995 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ccd9a756779f22df1d1c326552509b22d12d0b09565e145199e8bae9df16540c" Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.347059 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-c575-account-create-kpd7g" Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.350122 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-d94f-account-create-md9dh" event={"ID":"b6caae98-1807-47ad-a1b8-ddc1ff33b2d9","Type":"ContainerDied","Data":"c71f3118da2b95630a6236eec80eafbd35c14189595e77d94cc914b68ddf2c65"} Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.350161 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c71f3118da2b95630a6236eec80eafbd35c14189595e77d94cc914b68ddf2c65" Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.350223 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-d94f-account-create-md9dh" Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.359359 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-7v9gw" event={"ID":"bf266cb3-661b-4fbd-8162-8da1268957be","Type":"ContainerDied","Data":"062cd186bd2d34967d4e3028d727f97170e2af9f869b2881ebe0980dae753377"} Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.359400 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="062cd186bd2d34967d4e3028d727f97170e2af9f869b2881ebe0980dae753377" Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.359455 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-7v9gw" Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.362897 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-4xfns" event={"ID":"a253f619-24d4-453e-815f-c5301c77799c","Type":"ContainerStarted","Data":"62e81eaac63d5c569bae9bf10530fcb176395c37359dc28f068b0d5893662ef3"} Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.370344 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-m2l2v" event={"ID":"dd430211-c49c-40f1-a776-de76234249eb","Type":"ContainerDied","Data":"bf6dc3a7c519d1df44e672b7c4141504e6dc5c1dfc8f7c9750f18d3a38313109"} Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.370386 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bf6dc3a7c519d1df44e672b7c4141504e6dc5c1dfc8f7c9750f18d3a38313109" Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.370366 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-m2l2v" Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.378548 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-qf5fg" Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.401519 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-4xfns" podStartSLOduration=3.236086406 podStartE2EDuration="9.401492374s" podCreationTimestamp="2025-11-25 09:57:35 +0000 UTC" firstStartedPulling="2025-11-25 09:57:37.449362298 +0000 UTC m=+1263.302355674" lastFinishedPulling="2025-11-25 09:57:43.614768266 +0000 UTC m=+1269.467761642" observedRunningTime="2025-11-25 09:57:44.384852985 +0000 UTC m=+1270.237846361" watchObservedRunningTime="2025-11-25 09:57:44.401492374 +0000 UTC m=+1270.254485750" Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.471216 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8661757b-9fae-4808-9568-3997a0b0c7b6-ovsdbserver-sb\") pod \"8661757b-9fae-4808-9568-3997a0b0c7b6\" (UID: \"8661757b-9fae-4808-9568-3997a0b0c7b6\") " Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.471320 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8661757b-9fae-4808-9568-3997a0b0c7b6-ovsdbserver-nb\") pod \"8661757b-9fae-4808-9568-3997a0b0c7b6\" (UID: \"8661757b-9fae-4808-9568-3997a0b0c7b6\") " Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.471358 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8661757b-9fae-4808-9568-3997a0b0c7b6-config\") pod \"8661757b-9fae-4808-9568-3997a0b0c7b6\" (UID: \"8661757b-9fae-4808-9568-3997a0b0c7b6\") " Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.471443 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9c6nh\" (UniqueName: \"kubernetes.io/projected/8661757b-9fae-4808-9568-3997a0b0c7b6-kube-api-access-9c6nh\") pod \"8661757b-9fae-4808-9568-3997a0b0c7b6\" (UID: \"8661757b-9fae-4808-9568-3997a0b0c7b6\") " Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.471468 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8661757b-9fae-4808-9568-3997a0b0c7b6-dns-svc\") pod \"8661757b-9fae-4808-9568-3997a0b0c7b6\" (UID: \"8661757b-9fae-4808-9568-3997a0b0c7b6\") " Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.474877 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8661757b-9fae-4808-9568-3997a0b0c7b6-kube-api-access-9c6nh" (OuterVolumeSpecName: "kube-api-access-9c6nh") pod "8661757b-9fae-4808-9568-3997a0b0c7b6" (UID: "8661757b-9fae-4808-9568-3997a0b0c7b6"). InnerVolumeSpecName "kube-api-access-9c6nh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.521945 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8661757b-9fae-4808-9568-3997a0b0c7b6-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8661757b-9fae-4808-9568-3997a0b0c7b6" (UID: "8661757b-9fae-4808-9568-3997a0b0c7b6"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.523073 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8661757b-9fae-4808-9568-3997a0b0c7b6-config" (OuterVolumeSpecName: "config") pod "8661757b-9fae-4808-9568-3997a0b0c7b6" (UID: "8661757b-9fae-4808-9568-3997a0b0c7b6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.523608 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8661757b-9fae-4808-9568-3997a0b0c7b6-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "8661757b-9fae-4808-9568-3997a0b0c7b6" (UID: "8661757b-9fae-4808-9568-3997a0b0c7b6"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.537400 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8661757b-9fae-4808-9568-3997a0b0c7b6-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "8661757b-9fae-4808-9568-3997a0b0c7b6" (UID: "8661757b-9fae-4808-9568-3997a0b0c7b6"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.573865 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9c6nh\" (UniqueName: \"kubernetes.io/projected/8661757b-9fae-4808-9568-3997a0b0c7b6-kube-api-access-9c6nh\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.573895 4854 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8661757b-9fae-4808-9568-3997a0b0c7b6-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.573905 4854 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8661757b-9fae-4808-9568-3997a0b0c7b6-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.573913 4854 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8661757b-9fae-4808-9568-3997a0b0c7b6-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:44 crc kubenswrapper[4854]: I1125 09:57:44.573923 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8661757b-9fae-4808-9568-3997a0b0c7b6-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:45 crc kubenswrapper[4854]: I1125 09:57:45.382061 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-qf5fg" event={"ID":"8661757b-9fae-4808-9568-3997a0b0c7b6","Type":"ContainerDied","Data":"0f5484dfda6089d7307a6ecdb0443326211d7ec97d86d18c56062bddcd888de1"} Nov 25 09:57:45 crc kubenswrapper[4854]: I1125 09:57:45.382427 4854 scope.go:117] "RemoveContainer" containerID="69aac727da93d49e87569df43f08d80deb5ad646479eb358200a163bc2cd1b6b" Nov 25 09:57:45 crc kubenswrapper[4854]: I1125 09:57:45.382115 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-qf5fg" Nov 25 09:57:45 crc kubenswrapper[4854]: I1125 09:57:45.419886 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-qf5fg"] Nov 25 09:57:45 crc kubenswrapper[4854]: I1125 09:57:45.421698 4854 scope.go:117] "RemoveContainer" containerID="043a0130e4f0235379b8dcddc49849f293b1e7e83623af4ffe6f52ed90563889" Nov 25 09:57:45 crc kubenswrapper[4854]: I1125 09:57:45.434431 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-qf5fg"] Nov 25 09:57:47 crc kubenswrapper[4854]: I1125 09:57:47.031728 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8661757b-9fae-4808-9568-3997a0b0c7b6" path="/var/lib/kubelet/pods/8661757b-9fae-4808-9568-3997a0b0c7b6/volumes" Nov 25 09:57:48 crc kubenswrapper[4854]: I1125 09:57:48.419601 4854 generic.go:334] "Generic (PLEG): container finished" podID="a253f619-24d4-453e-815f-c5301c77799c" containerID="62e81eaac63d5c569bae9bf10530fcb176395c37359dc28f068b0d5893662ef3" exitCode=0 Nov 25 09:57:48 crc kubenswrapper[4854]: I1125 09:57:48.419661 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-4xfns" event={"ID":"a253f619-24d4-453e-815f-c5301c77799c","Type":"ContainerDied","Data":"62e81eaac63d5c569bae9bf10530fcb176395c37359dc28f068b0d5893662ef3"} Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.072272 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-4xfns" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.189319 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a253f619-24d4-453e-815f-c5301c77799c-combined-ca-bundle\") pod \"a253f619-24d4-453e-815f-c5301c77799c\" (UID: \"a253f619-24d4-453e-815f-c5301c77799c\") " Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.189523 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a253f619-24d4-453e-815f-c5301c77799c-config-data\") pod \"a253f619-24d4-453e-815f-c5301c77799c\" (UID: \"a253f619-24d4-453e-815f-c5301c77799c\") " Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.189626 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6pkgk\" (UniqueName: \"kubernetes.io/projected/a253f619-24d4-453e-815f-c5301c77799c-kube-api-access-6pkgk\") pod \"a253f619-24d4-453e-815f-c5301c77799c\" (UID: \"a253f619-24d4-453e-815f-c5301c77799c\") " Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.195939 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a253f619-24d4-453e-815f-c5301c77799c-kube-api-access-6pkgk" (OuterVolumeSpecName: "kube-api-access-6pkgk") pod "a253f619-24d4-453e-815f-c5301c77799c" (UID: "a253f619-24d4-453e-815f-c5301c77799c"). InnerVolumeSpecName "kube-api-access-6pkgk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.215259 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a253f619-24d4-453e-815f-c5301c77799c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a253f619-24d4-453e-815f-c5301c77799c" (UID: "a253f619-24d4-453e-815f-c5301c77799c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.235242 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a253f619-24d4-453e-815f-c5301c77799c-config-data" (OuterVolumeSpecName: "config-data") pod "a253f619-24d4-453e-815f-c5301c77799c" (UID: "a253f619-24d4-453e-815f-c5301c77799c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.291838 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a253f619-24d4-453e-815f-c5301c77799c-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.291875 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6pkgk\" (UniqueName: \"kubernetes.io/projected/a253f619-24d4-453e-815f-c5301c77799c-kube-api-access-6pkgk\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.291886 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a253f619-24d4-453e-815f-c5301c77799c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.440680 4854 generic.go:334] "Generic (PLEG): container finished" podID="558820bb-fc66-444a-96d3-107dbc60fb3f" containerID="031e0ad54e70165472a3cf59a3cad7f235349b2cde4059cedb137c9df7812b7e" exitCode=0 Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.440736 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"558820bb-fc66-444a-96d3-107dbc60fb3f","Type":"ContainerDied","Data":"031e0ad54e70165472a3cf59a3cad7f235349b2cde4059cedb137c9df7812b7e"} Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.442570 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-4xfns" event={"ID":"a253f619-24d4-453e-815f-c5301c77799c","Type":"ContainerDied","Data":"4d1e16e0e4b1fdb164657b3fc6e21ece8811114c8bbcdfb858ee4855ac6e7813"} Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.442610 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4d1e16e0e4b1fdb164657b3fc6e21ece8811114c8bbcdfb858ee4855ac6e7813" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.442681 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-4xfns" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.642472 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-bbf5cc879-tsxsg"] Nov 25 09:57:50 crc kubenswrapper[4854]: E1125 09:57:50.643344 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8661757b-9fae-4808-9568-3997a0b0c7b6" containerName="init" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.643368 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="8661757b-9fae-4808-9568-3997a0b0c7b6" containerName="init" Nov 25 09:57:50 crc kubenswrapper[4854]: E1125 09:57:50.643378 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6caae98-1807-47ad-a1b8-ddc1ff33b2d9" containerName="mariadb-account-create" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.643386 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6caae98-1807-47ad-a1b8-ddc1ff33b2d9" containerName="mariadb-account-create" Nov 25 09:57:50 crc kubenswrapper[4854]: E1125 09:57:50.643399 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba1f1fb5-ec0c-48be-9fd8-a0fd1a040360" containerName="mariadb-database-create" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.643408 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba1f1fb5-ec0c-48be-9fd8-a0fd1a040360" containerName="mariadb-database-create" Nov 25 09:57:50 crc kubenswrapper[4854]: E1125 09:57:50.643419 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7341835-d9f5-46b4-b382-364fab8df0ac" containerName="mariadb-account-create" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.643427 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7341835-d9f5-46b4-b382-364fab8df0ac" containerName="mariadb-account-create" Nov 25 09:57:50 crc kubenswrapper[4854]: E1125 09:57:50.643442 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="48cb153a-315a-4e7b-adea-51a576cd48a6" containerName="mariadb-database-create" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.643450 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="48cb153a-315a-4e7b-adea-51a576cd48a6" containerName="mariadb-database-create" Nov 25 09:57:50 crc kubenswrapper[4854]: E1125 09:57:50.643475 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89ab6a6b-99ee-4388-92a6-d1e0bb469abc" containerName="mariadb-account-create" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.643483 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="89ab6a6b-99ee-4388-92a6-d1e0bb469abc" containerName="mariadb-account-create" Nov 25 09:57:50 crc kubenswrapper[4854]: E1125 09:57:50.643498 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8661757b-9fae-4808-9568-3997a0b0c7b6" containerName="dnsmasq-dns" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.643506 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="8661757b-9fae-4808-9568-3997a0b0c7b6" containerName="dnsmasq-dns" Nov 25 09:57:50 crc kubenswrapper[4854]: E1125 09:57:50.643527 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a253f619-24d4-453e-815f-c5301c77799c" containerName="keystone-db-sync" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.643534 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="a253f619-24d4-453e-815f-c5301c77799c" containerName="keystone-db-sync" Nov 25 09:57:50 crc kubenswrapper[4854]: E1125 09:57:50.643548 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd430211-c49c-40f1-a776-de76234249eb" containerName="mariadb-database-create" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.643558 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd430211-c49c-40f1-a776-de76234249eb" containerName="mariadb-database-create" Nov 25 09:57:50 crc kubenswrapper[4854]: E1125 09:57:50.643583 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f037bba-7ce0-46ba-8fdf-5f3a3d9183dc" containerName="mariadb-account-create" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.643595 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f037bba-7ce0-46ba-8fdf-5f3a3d9183dc" containerName="mariadb-account-create" Nov 25 09:57:50 crc kubenswrapper[4854]: E1125 09:57:50.643630 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf266cb3-661b-4fbd-8162-8da1268957be" containerName="mariadb-database-create" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.643639 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf266cb3-661b-4fbd-8162-8da1268957be" containerName="mariadb-database-create" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.643902 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba1f1fb5-ec0c-48be-9fd8-a0fd1a040360" containerName="mariadb-database-create" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.643919 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7341835-d9f5-46b4-b382-364fab8df0ac" containerName="mariadb-account-create" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.643933 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="89ab6a6b-99ee-4388-92a6-d1e0bb469abc" containerName="mariadb-account-create" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.643948 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd430211-c49c-40f1-a776-de76234249eb" containerName="mariadb-database-create" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.643963 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="48cb153a-315a-4e7b-adea-51a576cd48a6" containerName="mariadb-database-create" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.643973 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f037bba-7ce0-46ba-8fdf-5f3a3d9183dc" containerName="mariadb-account-create" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.643989 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="a253f619-24d4-453e-815f-c5301c77799c" containerName="keystone-db-sync" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.644005 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="8661757b-9fae-4808-9568-3997a0b0c7b6" containerName="dnsmasq-dns" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.644021 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6caae98-1807-47ad-a1b8-ddc1ff33b2d9" containerName="mariadb-account-create" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.644034 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf266cb3-661b-4fbd-8162-8da1268957be" containerName="mariadb-database-create" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.645479 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bbf5cc879-tsxsg" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.665884 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bbf5cc879-tsxsg"] Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.686038 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-fjbh7"] Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.688055 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-fjbh7" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.691860 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.692107 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.726170 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.728652 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-wks95" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.735994 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.789377 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7zwj4\" (UniqueName: \"kubernetes.io/projected/3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7-kube-api-access-7zwj4\") pod \"dnsmasq-dns-bbf5cc879-tsxsg\" (UID: \"3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7\") " pod="openstack/dnsmasq-dns-bbf5cc879-tsxsg" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.789688 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb-config-data\") pod \"keystone-bootstrap-fjbh7\" (UID: \"9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb\") " pod="openstack/keystone-bootstrap-fjbh7" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.789809 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb-scripts\") pod \"keystone-bootstrap-fjbh7\" (UID: \"9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb\") " pod="openstack/keystone-bootstrap-fjbh7" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.789914 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7-ovsdbserver-nb\") pod \"dnsmasq-dns-bbf5cc879-tsxsg\" (UID: \"3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7\") " pod="openstack/dnsmasq-dns-bbf5cc879-tsxsg" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.789989 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7-dns-swift-storage-0\") pod \"dnsmasq-dns-bbf5cc879-tsxsg\" (UID: \"3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7\") " pod="openstack/dnsmasq-dns-bbf5cc879-tsxsg" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.790110 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb-combined-ca-bundle\") pod \"keystone-bootstrap-fjbh7\" (UID: \"9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb\") " pod="openstack/keystone-bootstrap-fjbh7" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.790181 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n2jq5\" (UniqueName: \"kubernetes.io/projected/9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb-kube-api-access-n2jq5\") pod \"keystone-bootstrap-fjbh7\" (UID: \"9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb\") " pod="openstack/keystone-bootstrap-fjbh7" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.790292 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7-config\") pod \"dnsmasq-dns-bbf5cc879-tsxsg\" (UID: \"3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7\") " pod="openstack/dnsmasq-dns-bbf5cc879-tsxsg" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.790374 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7-dns-svc\") pod \"dnsmasq-dns-bbf5cc879-tsxsg\" (UID: \"3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7\") " pod="openstack/dnsmasq-dns-bbf5cc879-tsxsg" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.790479 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb-credential-keys\") pod \"keystone-bootstrap-fjbh7\" (UID: \"9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb\") " pod="openstack/keystone-bootstrap-fjbh7" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.790607 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7-ovsdbserver-sb\") pod \"dnsmasq-dns-bbf5cc879-tsxsg\" (UID: \"3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7\") " pod="openstack/dnsmasq-dns-bbf5cc879-tsxsg" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.790743 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb-fernet-keys\") pod \"keystone-bootstrap-fjbh7\" (UID: \"9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb\") " pod="openstack/keystone-bootstrap-fjbh7" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.824007 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-fjbh7"] Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.892250 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-pfkjn"] Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.894005 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-pfkjn" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.895897 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb-scripts\") pod \"keystone-bootstrap-fjbh7\" (UID: \"9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb\") " pod="openstack/keystone-bootstrap-fjbh7" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.895945 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7-ovsdbserver-nb\") pod \"dnsmasq-dns-bbf5cc879-tsxsg\" (UID: \"3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7\") " pod="openstack/dnsmasq-dns-bbf5cc879-tsxsg" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.895967 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7-dns-swift-storage-0\") pod \"dnsmasq-dns-bbf5cc879-tsxsg\" (UID: \"3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7\") " pod="openstack/dnsmasq-dns-bbf5cc879-tsxsg" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.896009 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb-combined-ca-bundle\") pod \"keystone-bootstrap-fjbh7\" (UID: \"9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb\") " pod="openstack/keystone-bootstrap-fjbh7" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.896030 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n2jq5\" (UniqueName: \"kubernetes.io/projected/9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb-kube-api-access-n2jq5\") pod \"keystone-bootstrap-fjbh7\" (UID: \"9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb\") " pod="openstack/keystone-bootstrap-fjbh7" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.896060 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7-config\") pod \"dnsmasq-dns-bbf5cc879-tsxsg\" (UID: \"3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7\") " pod="openstack/dnsmasq-dns-bbf5cc879-tsxsg" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.896073 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7-dns-svc\") pod \"dnsmasq-dns-bbf5cc879-tsxsg\" (UID: \"3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7\") " pod="openstack/dnsmasq-dns-bbf5cc879-tsxsg" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.896090 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb-credential-keys\") pod \"keystone-bootstrap-fjbh7\" (UID: \"9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb\") " pod="openstack/keystone-bootstrap-fjbh7" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.896131 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7-ovsdbserver-sb\") pod \"dnsmasq-dns-bbf5cc879-tsxsg\" (UID: \"3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7\") " pod="openstack/dnsmasq-dns-bbf5cc879-tsxsg" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.896186 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb-fernet-keys\") pod \"keystone-bootstrap-fjbh7\" (UID: \"9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb\") " pod="openstack/keystone-bootstrap-fjbh7" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.896238 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7zwj4\" (UniqueName: \"kubernetes.io/projected/3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7-kube-api-access-7zwj4\") pod \"dnsmasq-dns-bbf5cc879-tsxsg\" (UID: \"3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7\") " pod="openstack/dnsmasq-dns-bbf5cc879-tsxsg" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.896278 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb-config-data\") pod \"keystone-bootstrap-fjbh7\" (UID: \"9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb\") " pod="openstack/keystone-bootstrap-fjbh7" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.898221 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7-config\") pod \"dnsmasq-dns-bbf5cc879-tsxsg\" (UID: \"3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7\") " pod="openstack/dnsmasq-dns-bbf5cc879-tsxsg" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.898484 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7-dns-swift-storage-0\") pod \"dnsmasq-dns-bbf5cc879-tsxsg\" (UID: \"3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7\") " pod="openstack/dnsmasq-dns-bbf5cc879-tsxsg" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.899232 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-g9249" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.899596 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.900707 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7-ovsdbserver-nb\") pod \"dnsmasq-dns-bbf5cc879-tsxsg\" (UID: \"3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7\") " pod="openstack/dnsmasq-dns-bbf5cc879-tsxsg" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.903723 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7-ovsdbserver-sb\") pod \"dnsmasq-dns-bbf5cc879-tsxsg\" (UID: \"3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7\") " pod="openstack/dnsmasq-dns-bbf5cc879-tsxsg" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.904500 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7-dns-svc\") pod \"dnsmasq-dns-bbf5cc879-tsxsg\" (UID: \"3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7\") " pod="openstack/dnsmasq-dns-bbf5cc879-tsxsg" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.915218 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb-scripts\") pod \"keystone-bootstrap-fjbh7\" (UID: \"9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb\") " pod="openstack/keystone-bootstrap-fjbh7" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.923846 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb-credential-keys\") pod \"keystone-bootstrap-fjbh7\" (UID: \"9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb\") " pod="openstack/keystone-bootstrap-fjbh7" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.925557 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb-fernet-keys\") pod \"keystone-bootstrap-fjbh7\" (UID: \"9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb\") " pod="openstack/keystone-bootstrap-fjbh7" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.930273 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb-combined-ca-bundle\") pod \"keystone-bootstrap-fjbh7\" (UID: \"9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb\") " pod="openstack/keystone-bootstrap-fjbh7" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.931369 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb-config-data\") pod \"keystone-bootstrap-fjbh7\" (UID: \"9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb\") " pod="openstack/keystone-bootstrap-fjbh7" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.944097 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-pfkjn"] Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.954878 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7zwj4\" (UniqueName: \"kubernetes.io/projected/3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7-kube-api-access-7zwj4\") pod \"dnsmasq-dns-bbf5cc879-tsxsg\" (UID: \"3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7\") " pod="openstack/dnsmasq-dns-bbf5cc879-tsxsg" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.962456 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n2jq5\" (UniqueName: \"kubernetes.io/projected/9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb-kube-api-access-n2jq5\") pod \"keystone-bootstrap-fjbh7\" (UID: \"9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb\") " pod="openstack/keystone-bootstrap-fjbh7" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.986620 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bbf5cc879-tsxsg" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.998477 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mfd67\" (UniqueName: \"kubernetes.io/projected/ec8d5beb-439a-4921-a6b8-029331402149-kube-api-access-mfd67\") pod \"heat-db-sync-pfkjn\" (UID: \"ec8d5beb-439a-4921-a6b8-029331402149\") " pod="openstack/heat-db-sync-pfkjn" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.998530 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec8d5beb-439a-4921-a6b8-029331402149-combined-ca-bundle\") pod \"heat-db-sync-pfkjn\" (UID: \"ec8d5beb-439a-4921-a6b8-029331402149\") " pod="openstack/heat-db-sync-pfkjn" Nov 25 09:57:50 crc kubenswrapper[4854]: I1125 09:57:50.998567 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec8d5beb-439a-4921-a6b8-029331402149-config-data\") pod \"heat-db-sync-pfkjn\" (UID: \"ec8d5beb-439a-4921-a6b8-029331402149\") " pod="openstack/heat-db-sync-pfkjn" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.080201 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-p594s"] Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.081996 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-fjbh7" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.082611 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-p594s" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.093361 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-x5m65" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.100430 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mfd67\" (UniqueName: \"kubernetes.io/projected/ec8d5beb-439a-4921-a6b8-029331402149-kube-api-access-mfd67\") pod \"heat-db-sync-pfkjn\" (UID: \"ec8d5beb-439a-4921-a6b8-029331402149\") " pod="openstack/heat-db-sync-pfkjn" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.100488 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec8d5beb-439a-4921-a6b8-029331402149-combined-ca-bundle\") pod \"heat-db-sync-pfkjn\" (UID: \"ec8d5beb-439a-4921-a6b8-029331402149\") " pod="openstack/heat-db-sync-pfkjn" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.100532 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec8d5beb-439a-4921-a6b8-029331402149-config-data\") pod \"heat-db-sync-pfkjn\" (UID: \"ec8d5beb-439a-4921-a6b8-029331402149\") " pod="openstack/heat-db-sync-pfkjn" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.102048 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.105640 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.118430 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec8d5beb-439a-4921-a6b8-029331402149-combined-ca-bundle\") pod \"heat-db-sync-pfkjn\" (UID: \"ec8d5beb-439a-4921-a6b8-029331402149\") " pod="openstack/heat-db-sync-pfkjn" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.142983 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-p594s"] Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.144517 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec8d5beb-439a-4921-a6b8-029331402149-config-data\") pod \"heat-db-sync-pfkjn\" (UID: \"ec8d5beb-439a-4921-a6b8-029331402149\") " pod="openstack/heat-db-sync-pfkjn" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.176400 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mfd67\" (UniqueName: \"kubernetes.io/projected/ec8d5beb-439a-4921-a6b8-029331402149-kube-api-access-mfd67\") pod \"heat-db-sync-pfkjn\" (UID: \"ec8d5beb-439a-4921-a6b8-029331402149\") " pod="openstack/heat-db-sync-pfkjn" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.189085 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-5h66s"] Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.190772 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-5h66s" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.193969 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.193997 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-z7f8l" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.194122 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.204931 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4f828059-1092-45cd-99a8-3915b6bab37f-db-sync-config-data\") pod \"cinder-db-sync-p594s\" (UID: \"4f828059-1092-45cd-99a8-3915b6bab37f\") " pod="openstack/cinder-db-sync-p594s" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.205038 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f828059-1092-45cd-99a8-3915b6bab37f-combined-ca-bundle\") pod \"cinder-db-sync-p594s\" (UID: \"4f828059-1092-45cd-99a8-3915b6bab37f\") " pod="openstack/cinder-db-sync-p594s" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.205085 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f828059-1092-45cd-99a8-3915b6bab37f-config-data\") pod \"cinder-db-sync-p594s\" (UID: \"4f828059-1092-45cd-99a8-3915b6bab37f\") " pod="openstack/cinder-db-sync-p594s" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.205137 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4f828059-1092-45cd-99a8-3915b6bab37f-scripts\") pod \"cinder-db-sync-p594s\" (UID: \"4f828059-1092-45cd-99a8-3915b6bab37f\") " pod="openstack/cinder-db-sync-p594s" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.205165 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4f828059-1092-45cd-99a8-3915b6bab37f-etc-machine-id\") pod \"cinder-db-sync-p594s\" (UID: \"4f828059-1092-45cd-99a8-3915b6bab37f\") " pod="openstack/cinder-db-sync-p594s" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.205218 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l4dnk\" (UniqueName: \"kubernetes.io/projected/4f828059-1092-45cd-99a8-3915b6bab37f-kube-api-access-l4dnk\") pod \"cinder-db-sync-p594s\" (UID: \"4f828059-1092-45cd-99a8-3915b6bab37f\") " pod="openstack/cinder-db-sync-p594s" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.209953 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-5h66s"] Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.233806 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-hkj5t"] Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.235955 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-hkj5t" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.238147 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-v8f9c" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.238405 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.247165 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bbf5cc879-tsxsg"] Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.263110 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-hkj5t"] Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.300403 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-fd5ht"] Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.303812 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-fd5ht" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.307118 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/816b6c7b-9d88-412e-8e20-5630cc8fd4a9-combined-ca-bundle\") pod \"neutron-db-sync-5h66s\" (UID: \"816b6c7b-9d88-412e-8e20-5630cc8fd4a9\") " pod="openstack/neutron-db-sync-5h66s" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.307209 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4f828059-1092-45cd-99a8-3915b6bab37f-scripts\") pod \"cinder-db-sync-p594s\" (UID: \"4f828059-1092-45cd-99a8-3915b6bab37f\") " pod="openstack/cinder-db-sync-p594s" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.307246 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4f828059-1092-45cd-99a8-3915b6bab37f-etc-machine-id\") pod \"cinder-db-sync-p594s\" (UID: \"4f828059-1092-45cd-99a8-3915b6bab37f\") " pod="openstack/cinder-db-sync-p594s" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.307294 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70897159-9d6f-44cc-9b46-5f6a5d18fd8b-combined-ca-bundle\") pod \"barbican-db-sync-hkj5t\" (UID: \"70897159-9d6f-44cc-9b46-5f6a5d18fd8b\") " pod="openstack/barbican-db-sync-hkj5t" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.307388 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l4dnk\" (UniqueName: \"kubernetes.io/projected/4f828059-1092-45cd-99a8-3915b6bab37f-kube-api-access-l4dnk\") pod \"cinder-db-sync-p594s\" (UID: \"4f828059-1092-45cd-99a8-3915b6bab37f\") " pod="openstack/cinder-db-sync-p594s" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.307434 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/816b6c7b-9d88-412e-8e20-5630cc8fd4a9-config\") pod \"neutron-db-sync-5h66s\" (UID: \"816b6c7b-9d88-412e-8e20-5630cc8fd4a9\") " pod="openstack/neutron-db-sync-5h66s" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.307467 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rp84h\" (UniqueName: \"kubernetes.io/projected/816b6c7b-9d88-412e-8e20-5630cc8fd4a9-kube-api-access-rp84h\") pod \"neutron-db-sync-5h66s\" (UID: \"816b6c7b-9d88-412e-8e20-5630cc8fd4a9\") " pod="openstack/neutron-db-sync-5h66s" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.307488 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/70897159-9d6f-44cc-9b46-5f6a5d18fd8b-db-sync-config-data\") pod \"barbican-db-sync-hkj5t\" (UID: \"70897159-9d6f-44cc-9b46-5f6a5d18fd8b\") " pod="openstack/barbican-db-sync-hkj5t" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.307542 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4f828059-1092-45cd-99a8-3915b6bab37f-db-sync-config-data\") pod \"cinder-db-sync-p594s\" (UID: \"4f828059-1092-45cd-99a8-3915b6bab37f\") " pod="openstack/cinder-db-sync-p594s" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.307562 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x25l4\" (UniqueName: \"kubernetes.io/projected/70897159-9d6f-44cc-9b46-5f6a5d18fd8b-kube-api-access-x25l4\") pod \"barbican-db-sync-hkj5t\" (UID: \"70897159-9d6f-44cc-9b46-5f6a5d18fd8b\") " pod="openstack/barbican-db-sync-hkj5t" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.307697 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f828059-1092-45cd-99a8-3915b6bab37f-combined-ca-bundle\") pod \"cinder-db-sync-p594s\" (UID: \"4f828059-1092-45cd-99a8-3915b6bab37f\") " pod="openstack/cinder-db-sync-p594s" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.307765 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f828059-1092-45cd-99a8-3915b6bab37f-config-data\") pod \"cinder-db-sync-p594s\" (UID: \"4f828059-1092-45cd-99a8-3915b6bab37f\") " pod="openstack/cinder-db-sync-p594s" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.309824 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4f828059-1092-45cd-99a8-3915b6bab37f-etc-machine-id\") pod \"cinder-db-sync-p594s\" (UID: \"4f828059-1092-45cd-99a8-3915b6bab37f\") " pod="openstack/cinder-db-sync-p594s" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.312303 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-5n9gc"] Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.313900 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-5n9gc" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.325185 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.325287 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.325469 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-q9wjs" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.328152 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-fd5ht"] Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.332160 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-pfkjn" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.348705 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f828059-1092-45cd-99a8-3915b6bab37f-config-data\") pod \"cinder-db-sync-p594s\" (UID: \"4f828059-1092-45cd-99a8-3915b6bab37f\") " pod="openstack/cinder-db-sync-p594s" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.350190 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4f828059-1092-45cd-99a8-3915b6bab37f-db-sync-config-data\") pod \"cinder-db-sync-p594s\" (UID: \"4f828059-1092-45cd-99a8-3915b6bab37f\") " pod="openstack/cinder-db-sync-p594s" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.351214 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4f828059-1092-45cd-99a8-3915b6bab37f-scripts\") pod \"cinder-db-sync-p594s\" (UID: \"4f828059-1092-45cd-99a8-3915b6bab37f\") " pod="openstack/cinder-db-sync-p594s" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.352746 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f828059-1092-45cd-99a8-3915b6bab37f-combined-ca-bundle\") pod \"cinder-db-sync-p594s\" (UID: \"4f828059-1092-45cd-99a8-3915b6bab37f\") " pod="openstack/cinder-db-sync-p594s" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.353710 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-5n9gc"] Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.358296 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l4dnk\" (UniqueName: \"kubernetes.io/projected/4f828059-1092-45cd-99a8-3915b6bab37f-kube-api-access-l4dnk\") pod \"cinder-db-sync-p594s\" (UID: \"4f828059-1092-45cd-99a8-3915b6bab37f\") " pod="openstack/cinder-db-sync-p594s" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.409998 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4eaa166f-f908-46ac-87db-627ca7c64013-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-fd5ht\" (UID: \"4eaa166f-f908-46ac-87db-627ca7c64013\") " pod="openstack/dnsmasq-dns-56df8fb6b7-fd5ht" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.410062 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/816b6c7b-9d88-412e-8e20-5630cc8fd4a9-combined-ca-bundle\") pod \"neutron-db-sync-5h66s\" (UID: \"816b6c7b-9d88-412e-8e20-5630cc8fd4a9\") " pod="openstack/neutron-db-sync-5h66s" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.410135 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70897159-9d6f-44cc-9b46-5f6a5d18fd8b-combined-ca-bundle\") pod \"barbican-db-sync-hkj5t\" (UID: \"70897159-9d6f-44cc-9b46-5f6a5d18fd8b\") " pod="openstack/barbican-db-sync-hkj5t" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.410155 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33e7955b-280b-4907-a8ee-7fb1a46d6352-scripts\") pod \"placement-db-sync-5n9gc\" (UID: \"33e7955b-280b-4907-a8ee-7fb1a46d6352\") " pod="openstack/placement-db-sync-5n9gc" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.410174 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mmplg\" (UniqueName: \"kubernetes.io/projected/4eaa166f-f908-46ac-87db-627ca7c64013-kube-api-access-mmplg\") pod \"dnsmasq-dns-56df8fb6b7-fd5ht\" (UID: \"4eaa166f-f908-46ac-87db-627ca7c64013\") " pod="openstack/dnsmasq-dns-56df8fb6b7-fd5ht" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.410203 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4eaa166f-f908-46ac-87db-627ca7c64013-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-fd5ht\" (UID: \"4eaa166f-f908-46ac-87db-627ca7c64013\") " pod="openstack/dnsmasq-dns-56df8fb6b7-fd5ht" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.410224 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/816b6c7b-9d88-412e-8e20-5630cc8fd4a9-config\") pod \"neutron-db-sync-5h66s\" (UID: \"816b6c7b-9d88-412e-8e20-5630cc8fd4a9\") " pod="openstack/neutron-db-sync-5h66s" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.410239 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4eaa166f-f908-46ac-87db-627ca7c64013-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-fd5ht\" (UID: \"4eaa166f-f908-46ac-87db-627ca7c64013\") " pod="openstack/dnsmasq-dns-56df8fb6b7-fd5ht" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.410254 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33e7955b-280b-4907-a8ee-7fb1a46d6352-combined-ca-bundle\") pod \"placement-db-sync-5n9gc\" (UID: \"33e7955b-280b-4907-a8ee-7fb1a46d6352\") " pod="openstack/placement-db-sync-5n9gc" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.410269 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/33e7955b-280b-4907-a8ee-7fb1a46d6352-logs\") pod \"placement-db-sync-5n9gc\" (UID: \"33e7955b-280b-4907-a8ee-7fb1a46d6352\") " pod="openstack/placement-db-sync-5n9gc" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.410292 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rp84h\" (UniqueName: \"kubernetes.io/projected/816b6c7b-9d88-412e-8e20-5630cc8fd4a9-kube-api-access-rp84h\") pod \"neutron-db-sync-5h66s\" (UID: \"816b6c7b-9d88-412e-8e20-5630cc8fd4a9\") " pod="openstack/neutron-db-sync-5h66s" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.410311 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/70897159-9d6f-44cc-9b46-5f6a5d18fd8b-db-sync-config-data\") pod \"barbican-db-sync-hkj5t\" (UID: \"70897159-9d6f-44cc-9b46-5f6a5d18fd8b\") " pod="openstack/barbican-db-sync-hkj5t" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.410331 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4eaa166f-f908-46ac-87db-627ca7c64013-config\") pod \"dnsmasq-dns-56df8fb6b7-fd5ht\" (UID: \"4eaa166f-f908-46ac-87db-627ca7c64013\") " pod="openstack/dnsmasq-dns-56df8fb6b7-fd5ht" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.410356 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x25l4\" (UniqueName: \"kubernetes.io/projected/70897159-9d6f-44cc-9b46-5f6a5d18fd8b-kube-api-access-x25l4\") pod \"barbican-db-sync-hkj5t\" (UID: \"70897159-9d6f-44cc-9b46-5f6a5d18fd8b\") " pod="openstack/barbican-db-sync-hkj5t" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.410386 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nls2f\" (UniqueName: \"kubernetes.io/projected/33e7955b-280b-4907-a8ee-7fb1a46d6352-kube-api-access-nls2f\") pod \"placement-db-sync-5n9gc\" (UID: \"33e7955b-280b-4907-a8ee-7fb1a46d6352\") " pod="openstack/placement-db-sync-5n9gc" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.410414 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4eaa166f-f908-46ac-87db-627ca7c64013-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-fd5ht\" (UID: \"4eaa166f-f908-46ac-87db-627ca7c64013\") " pod="openstack/dnsmasq-dns-56df8fb6b7-fd5ht" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.410443 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33e7955b-280b-4907-a8ee-7fb1a46d6352-config-data\") pod \"placement-db-sync-5n9gc\" (UID: \"33e7955b-280b-4907-a8ee-7fb1a46d6352\") " pod="openstack/placement-db-sync-5n9gc" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.414117 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/816b6c7b-9d88-412e-8e20-5630cc8fd4a9-combined-ca-bundle\") pod \"neutron-db-sync-5h66s\" (UID: \"816b6c7b-9d88-412e-8e20-5630cc8fd4a9\") " pod="openstack/neutron-db-sync-5h66s" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.414959 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/70897159-9d6f-44cc-9b46-5f6a5d18fd8b-db-sync-config-data\") pod \"barbican-db-sync-hkj5t\" (UID: \"70897159-9d6f-44cc-9b46-5f6a5d18fd8b\") " pod="openstack/barbican-db-sync-hkj5t" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.423016 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/816b6c7b-9d88-412e-8e20-5630cc8fd4a9-config\") pod \"neutron-db-sync-5h66s\" (UID: \"816b6c7b-9d88-412e-8e20-5630cc8fd4a9\") " pod="openstack/neutron-db-sync-5h66s" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.423352 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70897159-9d6f-44cc-9b46-5f6a5d18fd8b-combined-ca-bundle\") pod \"barbican-db-sync-hkj5t\" (UID: \"70897159-9d6f-44cc-9b46-5f6a5d18fd8b\") " pod="openstack/barbican-db-sync-hkj5t" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.443375 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x25l4\" (UniqueName: \"kubernetes.io/projected/70897159-9d6f-44cc-9b46-5f6a5d18fd8b-kube-api-access-x25l4\") pod \"barbican-db-sync-hkj5t\" (UID: \"70897159-9d6f-44cc-9b46-5f6a5d18fd8b\") " pod="openstack/barbican-db-sync-hkj5t" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.448380 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rp84h\" (UniqueName: \"kubernetes.io/projected/816b6c7b-9d88-412e-8e20-5630cc8fd4a9-kube-api-access-rp84h\") pod \"neutron-db-sync-5h66s\" (UID: \"816b6c7b-9d88-412e-8e20-5630cc8fd4a9\") " pod="openstack/neutron-db-sync-5h66s" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.484536 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-p594s" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.509124 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"558820bb-fc66-444a-96d3-107dbc60fb3f","Type":"ContainerStarted","Data":"209e93625361dc88fc0a38c08f70043050f1f634c07d8c8ecf198a5933345c61"} Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.512777 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4eaa166f-f908-46ac-87db-627ca7c64013-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-fd5ht\" (UID: \"4eaa166f-f908-46ac-87db-627ca7c64013\") " pod="openstack/dnsmasq-dns-56df8fb6b7-fd5ht" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.512826 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33e7955b-280b-4907-a8ee-7fb1a46d6352-combined-ca-bundle\") pod \"placement-db-sync-5n9gc\" (UID: \"33e7955b-280b-4907-a8ee-7fb1a46d6352\") " pod="openstack/placement-db-sync-5n9gc" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.512882 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/33e7955b-280b-4907-a8ee-7fb1a46d6352-logs\") pod \"placement-db-sync-5n9gc\" (UID: \"33e7955b-280b-4907-a8ee-7fb1a46d6352\") " pod="openstack/placement-db-sync-5n9gc" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.513643 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/33e7955b-280b-4907-a8ee-7fb1a46d6352-logs\") pod \"placement-db-sync-5n9gc\" (UID: \"33e7955b-280b-4907-a8ee-7fb1a46d6352\") " pod="openstack/placement-db-sync-5n9gc" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.514199 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4eaa166f-f908-46ac-87db-627ca7c64013-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-fd5ht\" (UID: \"4eaa166f-f908-46ac-87db-627ca7c64013\") " pod="openstack/dnsmasq-dns-56df8fb6b7-fd5ht" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.514566 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4eaa166f-f908-46ac-87db-627ca7c64013-config\") pod \"dnsmasq-dns-56df8fb6b7-fd5ht\" (UID: \"4eaa166f-f908-46ac-87db-627ca7c64013\") " pod="openstack/dnsmasq-dns-56df8fb6b7-fd5ht" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.514657 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nls2f\" (UniqueName: \"kubernetes.io/projected/33e7955b-280b-4907-a8ee-7fb1a46d6352-kube-api-access-nls2f\") pod \"placement-db-sync-5n9gc\" (UID: \"33e7955b-280b-4907-a8ee-7fb1a46d6352\") " pod="openstack/placement-db-sync-5n9gc" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.514737 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4eaa166f-f908-46ac-87db-627ca7c64013-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-fd5ht\" (UID: \"4eaa166f-f908-46ac-87db-627ca7c64013\") " pod="openstack/dnsmasq-dns-56df8fb6b7-fd5ht" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.514798 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33e7955b-280b-4907-a8ee-7fb1a46d6352-config-data\") pod \"placement-db-sync-5n9gc\" (UID: \"33e7955b-280b-4907-a8ee-7fb1a46d6352\") " pod="openstack/placement-db-sync-5n9gc" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.514848 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4eaa166f-f908-46ac-87db-627ca7c64013-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-fd5ht\" (UID: \"4eaa166f-f908-46ac-87db-627ca7c64013\") " pod="openstack/dnsmasq-dns-56df8fb6b7-fd5ht" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.515054 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33e7955b-280b-4907-a8ee-7fb1a46d6352-scripts\") pod \"placement-db-sync-5n9gc\" (UID: \"33e7955b-280b-4907-a8ee-7fb1a46d6352\") " pod="openstack/placement-db-sync-5n9gc" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.515092 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mmplg\" (UniqueName: \"kubernetes.io/projected/4eaa166f-f908-46ac-87db-627ca7c64013-kube-api-access-mmplg\") pod \"dnsmasq-dns-56df8fb6b7-fd5ht\" (UID: \"4eaa166f-f908-46ac-87db-627ca7c64013\") " pod="openstack/dnsmasq-dns-56df8fb6b7-fd5ht" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.515147 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4eaa166f-f908-46ac-87db-627ca7c64013-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-fd5ht\" (UID: \"4eaa166f-f908-46ac-87db-627ca7c64013\") " pod="openstack/dnsmasq-dns-56df8fb6b7-fd5ht" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.515941 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4eaa166f-f908-46ac-87db-627ca7c64013-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-fd5ht\" (UID: \"4eaa166f-f908-46ac-87db-627ca7c64013\") " pod="openstack/dnsmasq-dns-56df8fb6b7-fd5ht" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.517856 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4eaa166f-f908-46ac-87db-627ca7c64013-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-fd5ht\" (UID: \"4eaa166f-f908-46ac-87db-627ca7c64013\") " pod="openstack/dnsmasq-dns-56df8fb6b7-fd5ht" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.518364 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33e7955b-280b-4907-a8ee-7fb1a46d6352-combined-ca-bundle\") pod \"placement-db-sync-5n9gc\" (UID: \"33e7955b-280b-4907-a8ee-7fb1a46d6352\") " pod="openstack/placement-db-sync-5n9gc" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.520413 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4eaa166f-f908-46ac-87db-627ca7c64013-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-fd5ht\" (UID: \"4eaa166f-f908-46ac-87db-627ca7c64013\") " pod="openstack/dnsmasq-dns-56df8fb6b7-fd5ht" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.522197 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4eaa166f-f908-46ac-87db-627ca7c64013-config\") pod \"dnsmasq-dns-56df8fb6b7-fd5ht\" (UID: \"4eaa166f-f908-46ac-87db-627ca7c64013\") " pod="openstack/dnsmasq-dns-56df8fb6b7-fd5ht" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.526049 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33e7955b-280b-4907-a8ee-7fb1a46d6352-scripts\") pod \"placement-db-sync-5n9gc\" (UID: \"33e7955b-280b-4907-a8ee-7fb1a46d6352\") " pod="openstack/placement-db-sync-5n9gc" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.530277 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-5h66s" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.532815 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33e7955b-280b-4907-a8ee-7fb1a46d6352-config-data\") pod \"placement-db-sync-5n9gc\" (UID: \"33e7955b-280b-4907-a8ee-7fb1a46d6352\") " pod="openstack/placement-db-sync-5n9gc" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.543028 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nls2f\" (UniqueName: \"kubernetes.io/projected/33e7955b-280b-4907-a8ee-7fb1a46d6352-kube-api-access-nls2f\") pod \"placement-db-sync-5n9gc\" (UID: \"33e7955b-280b-4907-a8ee-7fb1a46d6352\") " pod="openstack/placement-db-sync-5n9gc" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.546459 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mmplg\" (UniqueName: \"kubernetes.io/projected/4eaa166f-f908-46ac-87db-627ca7c64013-kube-api-access-mmplg\") pod \"dnsmasq-dns-56df8fb6b7-fd5ht\" (UID: \"4eaa166f-f908-46ac-87db-627ca7c64013\") " pod="openstack/dnsmasq-dns-56df8fb6b7-fd5ht" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.684410 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-hkj5t" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.792556 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-5n9gc" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.793186 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-fd5ht" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.853722 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-fjbh7"] Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.893962 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.912559 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.916474 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.917062 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-6k6vk" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.917185 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.919986 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 25 09:57:51 crc kubenswrapper[4854]: I1125 09:57:51.983341 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.031827 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.050263 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"d50edb50-d2f3-4018-88d8-93899801fa1b\") " pod="openstack/glance-default-external-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.050499 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d6pjg\" (UniqueName: \"kubernetes.io/projected/d50edb50-d2f3-4018-88d8-93899801fa1b-kube-api-access-d6pjg\") pod \"glance-default-external-api-0\" (UID: \"d50edb50-d2f3-4018-88d8-93899801fa1b\") " pod="openstack/glance-default-external-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.050876 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d50edb50-d2f3-4018-88d8-93899801fa1b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"d50edb50-d2f3-4018-88d8-93899801fa1b\") " pod="openstack/glance-default-external-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.051100 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d50edb50-d2f3-4018-88d8-93899801fa1b-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"d50edb50-d2f3-4018-88d8-93899801fa1b\") " pod="openstack/glance-default-external-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.051208 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d50edb50-d2f3-4018-88d8-93899801fa1b-config-data\") pod \"glance-default-external-api-0\" (UID: \"d50edb50-d2f3-4018-88d8-93899801fa1b\") " pod="openstack/glance-default-external-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.051424 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d50edb50-d2f3-4018-88d8-93899801fa1b-logs\") pod \"glance-default-external-api-0\" (UID: \"d50edb50-d2f3-4018-88d8-93899801fa1b\") " pod="openstack/glance-default-external-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.051541 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d50edb50-d2f3-4018-88d8-93899801fa1b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"d50edb50-d2f3-4018-88d8-93899801fa1b\") " pod="openstack/glance-default-external-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.051685 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d50edb50-d2f3-4018-88d8-93899801fa1b-scripts\") pod \"glance-default-external-api-0\" (UID: \"d50edb50-d2f3-4018-88d8-93899801fa1b\") " pod="openstack/glance-default-external-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.056737 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.062029 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.062336 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.132599 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.160959 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d50edb50-d2f3-4018-88d8-93899801fa1b-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"d50edb50-d2f3-4018-88d8-93899801fa1b\") " pod="openstack/glance-default-external-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.161009 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d50edb50-d2f3-4018-88d8-93899801fa1b-config-data\") pod \"glance-default-external-api-0\" (UID: \"d50edb50-d2f3-4018-88d8-93899801fa1b\") " pod="openstack/glance-default-external-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.161096 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d50edb50-d2f3-4018-88d8-93899801fa1b-logs\") pod \"glance-default-external-api-0\" (UID: \"d50edb50-d2f3-4018-88d8-93899801fa1b\") " pod="openstack/glance-default-external-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.161121 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d50edb50-d2f3-4018-88d8-93899801fa1b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"d50edb50-d2f3-4018-88d8-93899801fa1b\") " pod="openstack/glance-default-external-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.161151 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d50edb50-d2f3-4018-88d8-93899801fa1b-scripts\") pod \"glance-default-external-api-0\" (UID: \"d50edb50-d2f3-4018-88d8-93899801fa1b\") " pod="openstack/glance-default-external-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.161197 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"d50edb50-d2f3-4018-88d8-93899801fa1b\") " pod="openstack/glance-default-external-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.161217 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d6pjg\" (UniqueName: \"kubernetes.io/projected/d50edb50-d2f3-4018-88d8-93899801fa1b-kube-api-access-d6pjg\") pod \"glance-default-external-api-0\" (UID: \"d50edb50-d2f3-4018-88d8-93899801fa1b\") " pod="openstack/glance-default-external-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.161265 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d50edb50-d2f3-4018-88d8-93899801fa1b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"d50edb50-d2f3-4018-88d8-93899801fa1b\") " pod="openstack/glance-default-external-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.162310 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d50edb50-d2f3-4018-88d8-93899801fa1b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"d50edb50-d2f3-4018-88d8-93899801fa1b\") " pod="openstack/glance-default-external-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.164177 4854 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"d50edb50-d2f3-4018-88d8-93899801fa1b\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-external-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.171664 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d50edb50-d2f3-4018-88d8-93899801fa1b-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"d50edb50-d2f3-4018-88d8-93899801fa1b\") " pod="openstack/glance-default-external-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.173941 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d50edb50-d2f3-4018-88d8-93899801fa1b-logs\") pod \"glance-default-external-api-0\" (UID: \"d50edb50-d2f3-4018-88d8-93899801fa1b\") " pod="openstack/glance-default-external-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.184835 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d50edb50-d2f3-4018-88d8-93899801fa1b-config-data\") pod \"glance-default-external-api-0\" (UID: \"d50edb50-d2f3-4018-88d8-93899801fa1b\") " pod="openstack/glance-default-external-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.184857 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d50edb50-d2f3-4018-88d8-93899801fa1b-scripts\") pod \"glance-default-external-api-0\" (UID: \"d50edb50-d2f3-4018-88d8-93899801fa1b\") " pod="openstack/glance-default-external-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.185799 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d50edb50-d2f3-4018-88d8-93899801fa1b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"d50edb50-d2f3-4018-88d8-93899801fa1b\") " pod="openstack/glance-default-external-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.188081 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d6pjg\" (UniqueName: \"kubernetes.io/projected/d50edb50-d2f3-4018-88d8-93899801fa1b-kube-api-access-d6pjg\") pod \"glance-default-external-api-0\" (UID: \"d50edb50-d2f3-4018-88d8-93899801fa1b\") " pod="openstack/glance-default-external-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.211744 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.216518 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.217521 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"d50edb50-d2f3-4018-88d8-93899801fa1b\") " pod="openstack/glance-default-external-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.218897 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.219826 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.230451 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.245714 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bbf5cc879-tsxsg"] Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.264239 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-logs\") pod \"glance-default-internal-api-0\" (UID: \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.264593 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-scripts\") pod \"glance-default-internal-api-0\" (UID: \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.264664 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-config-data\") pod \"glance-default-internal-api-0\" (UID: \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.264803 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.264860 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sj425\" (UniqueName: \"kubernetes.io/projected/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-kube-api-access-sj425\") pod \"glance-default-internal-api-0\" (UID: \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.264916 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.265027 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.265063 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: W1125 09:57:52.277719 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3c746cab_609e_45d1_b9ff_b5b3a9b1d3b7.slice/crio-4a63e3e83d0131039f73f144b504083a60799ee106e9cbfba12fb411a661ac4a WatchSource:0}: Error finding container 4a63e3e83d0131039f73f144b504083a60799ee106e9cbfba12fb411a661ac4a: Status 404 returned error can't find the container with id 4a63e3e83d0131039f73f144b504083a60799ee106e9cbfba12fb411a661ac4a Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.283000 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.366591 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-logs\") pod \"glance-default-internal-api-0\" (UID: \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.366688 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-scripts\") pod \"glance-default-internal-api-0\" (UID: \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.366724 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-config-data\") pod \"ceilometer-0\" (UID: \"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d\") " pod="openstack/ceilometer-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.366790 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-config-data\") pod \"glance-default-internal-api-0\" (UID: \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.366854 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-run-httpd\") pod \"ceilometer-0\" (UID: \"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d\") " pod="openstack/ceilometer-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.366892 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.366930 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-scripts\") pod \"ceilometer-0\" (UID: \"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d\") " pod="openstack/ceilometer-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.366964 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sj425\" (UniqueName: \"kubernetes.io/projected/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-kube-api-access-sj425\") pod \"glance-default-internal-api-0\" (UID: \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.367018 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.367088 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lflqq\" (UniqueName: \"kubernetes.io/projected/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-kube-api-access-lflqq\") pod \"ceilometer-0\" (UID: \"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d\") " pod="openstack/ceilometer-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.367122 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d\") " pod="openstack/ceilometer-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.367156 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d\") " pod="openstack/ceilometer-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.367179 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-log-httpd\") pod \"ceilometer-0\" (UID: \"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d\") " pod="openstack/ceilometer-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.367231 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.367257 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.368942 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-logs\") pod \"glance-default-internal-api-0\" (UID: \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.371091 4854 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-internal-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.374377 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.375004 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.378972 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-scripts\") pod \"glance-default-internal-api-0\" (UID: \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.379505 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-config-data\") pod \"glance-default-internal-api-0\" (UID: \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.380834 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.388515 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sj425\" (UniqueName: \"kubernetes.io/projected/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-kube-api-access-sj425\") pod \"glance-default-internal-api-0\" (UID: \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.432892 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.468931 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-run-httpd\") pod \"ceilometer-0\" (UID: \"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d\") " pod="openstack/ceilometer-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.469187 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-scripts\") pod \"ceilometer-0\" (UID: \"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d\") " pod="openstack/ceilometer-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.469411 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lflqq\" (UniqueName: \"kubernetes.io/projected/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-kube-api-access-lflqq\") pod \"ceilometer-0\" (UID: \"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d\") " pod="openstack/ceilometer-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.469513 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d\") " pod="openstack/ceilometer-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.469624 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d\") " pod="openstack/ceilometer-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.469999 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-log-httpd\") pod \"ceilometer-0\" (UID: \"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d\") " pod="openstack/ceilometer-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.470183 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-config-data\") pod \"ceilometer-0\" (UID: \"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d\") " pod="openstack/ceilometer-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.472635 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-pfkjn"] Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.469719 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-run-httpd\") pod \"ceilometer-0\" (UID: \"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d\") " pod="openstack/ceilometer-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.473137 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-log-httpd\") pod \"ceilometer-0\" (UID: \"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d\") " pod="openstack/ceilometer-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.484692 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d\") " pod="openstack/ceilometer-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.484978 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-scripts\") pod \"ceilometer-0\" (UID: \"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d\") " pod="openstack/ceilometer-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.504074 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-config-data\") pod \"ceilometer-0\" (UID: \"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d\") " pod="openstack/ceilometer-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.504398 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d\") " pod="openstack/ceilometer-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.545162 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bbf5cc879-tsxsg" event={"ID":"3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7","Type":"ContainerStarted","Data":"4a63e3e83d0131039f73f144b504083a60799ee106e9cbfba12fb411a661ac4a"} Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.559040 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-fjbh7" event={"ID":"9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb","Type":"ContainerStarted","Data":"411b6bfbaca9f65a812d278560ed5f74baeec115d880b9e83990567da973e0e5"} Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.560224 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.562864 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-pfkjn" event={"ID":"ec8d5beb-439a-4921-a6b8-029331402149","Type":"ContainerStarted","Data":"eefa3086f4d38fdb1c72755e17b4ef8c15f93dfaa5e3f6662950163c7d8bf538"} Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.579952 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lflqq\" (UniqueName: \"kubernetes.io/projected/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-kube-api-access-lflqq\") pod \"ceilometer-0\" (UID: \"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d\") " pod="openstack/ceilometer-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.580834 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.792991 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-5h66s"] Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.826637 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-p594s"] Nov 25 09:57:52 crc kubenswrapper[4854]: I1125 09:57:52.874232 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-hkj5t"] Nov 25 09:57:52 crc kubenswrapper[4854]: W1125 09:57:52.897022 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4f828059_1092_45cd_99a8_3915b6bab37f.slice/crio-5717fdbad0c1b64a48246b113f43120482ab1e3ff446dac18ab02d6733402b08 WatchSource:0}: Error finding container 5717fdbad0c1b64a48246b113f43120482ab1e3ff446dac18ab02d6733402b08: Status 404 returned error can't find the container with id 5717fdbad0c1b64a48246b113f43120482ab1e3ff446dac18ab02d6733402b08 Nov 25 09:57:53 crc kubenswrapper[4854]: W1125 09:57:52.990214 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod70897159_9d6f_44cc_9b46_5f6a5d18fd8b.slice/crio-78193695004fd85e472cf058eed7bc609224e1b5671d71c1309243dd4910f2aa WatchSource:0}: Error finding container 78193695004fd85e472cf058eed7bc609224e1b5671d71c1309243dd4910f2aa: Status 404 returned error can't find the container with id 78193695004fd85e472cf058eed7bc609224e1b5671d71c1309243dd4910f2aa Nov 25 09:57:53 crc kubenswrapper[4854]: I1125 09:57:53.151974 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:57:53 crc kubenswrapper[4854]: I1125 09:57:53.221658 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:57:53 crc kubenswrapper[4854]: I1125 09:57:53.325642 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-5n9gc"] Nov 25 09:57:53 crc kubenswrapper[4854]: I1125 09:57:53.354231 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-fd5ht"] Nov 25 09:57:53 crc kubenswrapper[4854]: I1125 09:57:53.466917 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:57:53 crc kubenswrapper[4854]: I1125 09:57:53.513739 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:57:53 crc kubenswrapper[4854]: I1125 09:57:53.545271 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:57:53 crc kubenswrapper[4854]: I1125 09:57:53.597427 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:57:53 crc kubenswrapper[4854]: I1125 09:57:53.599356 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-fjbh7" event={"ID":"9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb","Type":"ContainerStarted","Data":"e95499c001880a2f7439b1ed89af334beb9764b8371e6cc762af35f32f369db2"} Nov 25 09:57:53 crc kubenswrapper[4854]: I1125 09:57:53.618298 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-5n9gc" event={"ID":"33e7955b-280b-4907-a8ee-7fb1a46d6352","Type":"ContainerStarted","Data":"55f14d76d4c0b7d195763aa2a8e5d550c9c4d401cf6262cccf1d9f826103ec61"} Nov 25 09:57:53 crc kubenswrapper[4854]: I1125 09:57:53.627743 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-fjbh7" podStartSLOduration=3.627725564 podStartE2EDuration="3.627725564s" podCreationTimestamp="2025-11-25 09:57:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:57:53.626231292 +0000 UTC m=+1279.479224668" watchObservedRunningTime="2025-11-25 09:57:53.627725564 +0000 UTC m=+1279.480718940" Nov 25 09:57:53 crc kubenswrapper[4854]: I1125 09:57:53.627980 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-hkj5t" event={"ID":"70897159-9d6f-44cc-9b46-5f6a5d18fd8b","Type":"ContainerStarted","Data":"78193695004fd85e472cf058eed7bc609224e1b5671d71c1309243dd4910f2aa"} Nov 25 09:57:53 crc kubenswrapper[4854]: I1125 09:57:53.630749 4854 generic.go:334] "Generic (PLEG): container finished" podID="3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7" containerID="149a8010f10a643ed856b0bf2e96dcb9b81b9ff0b64236def76849c7ba01231b" exitCode=0 Nov 25 09:57:53 crc kubenswrapper[4854]: I1125 09:57:53.631116 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bbf5cc879-tsxsg" event={"ID":"3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7","Type":"ContainerDied","Data":"149a8010f10a643ed856b0bf2e96dcb9b81b9ff0b64236def76849c7ba01231b"} Nov 25 09:57:53 crc kubenswrapper[4854]: I1125 09:57:53.634370 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-5h66s" event={"ID":"816b6c7b-9d88-412e-8e20-5630cc8fd4a9","Type":"ContainerStarted","Data":"72bcdd2ad4b7aad7546d1ea3fecc2825bda956c47cfb3fdbd679da672fc8a442"} Nov 25 09:57:53 crc kubenswrapper[4854]: I1125 09:57:53.636429 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-p594s" event={"ID":"4f828059-1092-45cd-99a8-3915b6bab37f","Type":"ContainerStarted","Data":"5717fdbad0c1b64a48246b113f43120482ab1e3ff446dac18ab02d6733402b08"} Nov 25 09:57:53 crc kubenswrapper[4854]: W1125 09:57:53.948931 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod43fa4f31_2561_4dd1_8f31_5b7ed7b2b636.slice/crio-9794a4f48a503d7fee6497e065156f2ee8800401214014e3510f32bcb3d4e2cc WatchSource:0}: Error finding container 9794a4f48a503d7fee6497e065156f2ee8800401214014e3510f32bcb3d4e2cc: Status 404 returned error can't find the container with id 9794a4f48a503d7fee6497e065156f2ee8800401214014e3510f32bcb3d4e2cc Nov 25 09:57:54 crc kubenswrapper[4854]: I1125 09:57:54.594887 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bbf5cc879-tsxsg" Nov 25 09:57:54 crc kubenswrapper[4854]: I1125 09:57:54.658805 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d","Type":"ContainerStarted","Data":"3240676e5231a534e34674f7c695edeaa160de2a286121f336f26ab8756b59d3"} Nov 25 09:57:54 crc kubenswrapper[4854]: I1125 09:57:54.662393 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d50edb50-d2f3-4018-88d8-93899801fa1b","Type":"ContainerStarted","Data":"1a958893fd967a0a36c7463c5a05a427f6d6bd1807dc52d6aa75d97d87e3ac57"} Nov 25 09:57:54 crc kubenswrapper[4854]: I1125 09:57:54.664181 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-fd5ht" event={"ID":"4eaa166f-f908-46ac-87db-627ca7c64013","Type":"ContainerStarted","Data":"ae9e0c32040cdafac834137a146ab6404c86bdaa417c1b46906ea728ebf03a8c"} Nov 25 09:57:54 crc kubenswrapper[4854]: I1125 09:57:54.668954 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bbf5cc879-tsxsg" event={"ID":"3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7","Type":"ContainerDied","Data":"4a63e3e83d0131039f73f144b504083a60799ee106e9cbfba12fb411a661ac4a"} Nov 25 09:57:54 crc kubenswrapper[4854]: I1125 09:57:54.668998 4854 scope.go:117] "RemoveContainer" containerID="149a8010f10a643ed856b0bf2e96dcb9b81b9ff0b64236def76849c7ba01231b" Nov 25 09:57:54 crc kubenswrapper[4854]: I1125 09:57:54.669118 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bbf5cc879-tsxsg" Nov 25 09:57:54 crc kubenswrapper[4854]: I1125 09:57:54.676578 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636","Type":"ContainerStarted","Data":"9794a4f48a503d7fee6497e065156f2ee8800401214014e3510f32bcb3d4e2cc"} Nov 25 09:57:54 crc kubenswrapper[4854]: I1125 09:57:54.756363 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7-dns-svc\") pod \"3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7\" (UID: \"3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7\") " Nov 25 09:57:54 crc kubenswrapper[4854]: I1125 09:57:54.756691 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7zwj4\" (UniqueName: \"kubernetes.io/projected/3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7-kube-api-access-7zwj4\") pod \"3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7\" (UID: \"3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7\") " Nov 25 09:57:54 crc kubenswrapper[4854]: I1125 09:57:54.756709 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7-ovsdbserver-nb\") pod \"3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7\" (UID: \"3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7\") " Nov 25 09:57:54 crc kubenswrapper[4854]: I1125 09:57:54.756794 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7-config\") pod \"3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7\" (UID: \"3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7\") " Nov 25 09:57:54 crc kubenswrapper[4854]: I1125 09:57:54.756855 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7-ovsdbserver-sb\") pod \"3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7\" (UID: \"3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7\") " Nov 25 09:57:54 crc kubenswrapper[4854]: I1125 09:57:54.756953 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7-dns-swift-storage-0\") pod \"3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7\" (UID: \"3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7\") " Nov 25 09:57:54 crc kubenswrapper[4854]: I1125 09:57:54.782865 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7-kube-api-access-7zwj4" (OuterVolumeSpecName: "kube-api-access-7zwj4") pod "3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7" (UID: "3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7"). InnerVolumeSpecName "kube-api-access-7zwj4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:57:54 crc kubenswrapper[4854]: I1125 09:57:54.817588 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7" (UID: "3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:57:54 crc kubenswrapper[4854]: I1125 09:57:54.836072 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7" (UID: "3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:57:54 crc kubenswrapper[4854]: I1125 09:57:54.842627 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7-config" (OuterVolumeSpecName: "config") pod "3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7" (UID: "3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:57:54 crc kubenswrapper[4854]: I1125 09:57:54.843017 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7" (UID: "3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:57:54 crc kubenswrapper[4854]: I1125 09:57:54.859153 4854 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:54 crc kubenswrapper[4854]: I1125 09:57:54.859186 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7zwj4\" (UniqueName: \"kubernetes.io/projected/3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7-kube-api-access-7zwj4\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:54 crc kubenswrapper[4854]: I1125 09:57:54.859198 4854 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:54 crc kubenswrapper[4854]: I1125 09:57:54.859207 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:54 crc kubenswrapper[4854]: I1125 09:57:54.859216 4854 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:54 crc kubenswrapper[4854]: I1125 09:57:54.878235 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7" (UID: "3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:57:54 crc kubenswrapper[4854]: I1125 09:57:54.962149 4854 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:55 crc kubenswrapper[4854]: I1125 09:57:55.701171 4854 generic.go:334] "Generic (PLEG): container finished" podID="4eaa166f-f908-46ac-87db-627ca7c64013" containerID="9f740c6d25f4372d0e324f10bb9f465b2b14fa184ee76e75139047148f697bb8" exitCode=0 Nov 25 09:57:55 crc kubenswrapper[4854]: I1125 09:57:55.701289 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-fd5ht" event={"ID":"4eaa166f-f908-46ac-87db-627ca7c64013","Type":"ContainerDied","Data":"9f740c6d25f4372d0e324f10bb9f465b2b14fa184ee76e75139047148f697bb8"} Nov 25 09:57:55 crc kubenswrapper[4854]: I1125 09:57:55.738571 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"558820bb-fc66-444a-96d3-107dbc60fb3f","Type":"ContainerStarted","Data":"da799ec478ac92b5c707516eb6d5458beb959cc552f0e4a283cdb37c3854b066"} Nov 25 09:57:55 crc kubenswrapper[4854]: I1125 09:57:55.738990 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"558820bb-fc66-444a-96d3-107dbc60fb3f","Type":"ContainerStarted","Data":"94ba241935fa2c198b7f28eaa3f45d8c867d44520e47a201018bcb58546206c7"} Nov 25 09:57:55 crc kubenswrapper[4854]: I1125 09:57:55.743289 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-5h66s" event={"ID":"816b6c7b-9d88-412e-8e20-5630cc8fd4a9","Type":"ContainerStarted","Data":"4c74dced21ec183c10db63707e93a19cdc6f123ef3543bba8f2236f96fd253c2"} Nov 25 09:57:55 crc kubenswrapper[4854]: I1125 09:57:55.747762 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636","Type":"ContainerStarted","Data":"9e6ad237973c08ff37d1d0eb4465fae58e0f80f65e08334f009d0597e41e0d81"} Nov 25 09:57:55 crc kubenswrapper[4854]: I1125 09:57:55.791367 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=18.791341326 podStartE2EDuration="18.791341326s" podCreationTimestamp="2025-11-25 09:57:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:57:55.768174789 +0000 UTC m=+1281.621168185" watchObservedRunningTime="2025-11-25 09:57:55.791341326 +0000 UTC m=+1281.644334712" Nov 25 09:57:55 crc kubenswrapper[4854]: I1125 09:57:55.855977 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-5h66s" podStartSLOduration=4.855938884 podStartE2EDuration="4.855938884s" podCreationTimestamp="2025-11-25 09:57:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:57:55.796006015 +0000 UTC m=+1281.648999411" watchObservedRunningTime="2025-11-25 09:57:55.855938884 +0000 UTC m=+1281.708932260" Nov 25 09:57:56 crc kubenswrapper[4854]: I1125 09:57:56.781554 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d50edb50-d2f3-4018-88d8-93899801fa1b","Type":"ContainerStarted","Data":"a9afcd7b2d3f6360a8ee1c1ca4d0eba3396615064f65af612fb547a3d6dac279"} Nov 25 09:57:57 crc kubenswrapper[4854]: I1125 09:57:57.794709 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d50edb50-d2f3-4018-88d8-93899801fa1b","Type":"ContainerStarted","Data":"da9a2cf24a5a3140cb0cbeaf943978f3cce742dc2e3a927f854f0c00854f614a"} Nov 25 09:57:57 crc kubenswrapper[4854]: I1125 09:57:57.794939 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="d50edb50-d2f3-4018-88d8-93899801fa1b" containerName="glance-httpd" containerID="cri-o://da9a2cf24a5a3140cb0cbeaf943978f3cce742dc2e3a927f854f0c00854f614a" gracePeriod=30 Nov 25 09:57:57 crc kubenswrapper[4854]: I1125 09:57:57.794926 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="d50edb50-d2f3-4018-88d8-93899801fa1b" containerName="glance-log" containerID="cri-o://a9afcd7b2d3f6360a8ee1c1ca4d0eba3396615064f65af612fb547a3d6dac279" gracePeriod=30 Nov 25 09:57:57 crc kubenswrapper[4854]: I1125 09:57:57.799586 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-fd5ht" event={"ID":"4eaa166f-f908-46ac-87db-627ca7c64013","Type":"ContainerStarted","Data":"fd62a5b4558aeeb06120ef1c1dafa0a62bbd6a6d87135493810e373877a16ef3"} Nov 25 09:57:57 crc kubenswrapper[4854]: I1125 09:57:57.800646 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-56df8fb6b7-fd5ht" Nov 25 09:57:57 crc kubenswrapper[4854]: I1125 09:57:57.805348 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636","Type":"ContainerStarted","Data":"634d753e1742d50264ed245259940ebcc600fb54ba9e79978960ef6fdd4e6b43"} Nov 25 09:57:57 crc kubenswrapper[4854]: I1125 09:57:57.805545 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="43fa4f31-2561-4dd1-8f31-5b7ed7b2b636" containerName="glance-httpd" containerID="cri-o://634d753e1742d50264ed245259940ebcc600fb54ba9e79978960ef6fdd4e6b43" gracePeriod=30 Nov 25 09:57:57 crc kubenswrapper[4854]: I1125 09:57:57.805630 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="43fa4f31-2561-4dd1-8f31-5b7ed7b2b636" containerName="glance-log" containerID="cri-o://9e6ad237973c08ff37d1d0eb4465fae58e0f80f65e08334f009d0597e41e0d81" gracePeriod=30 Nov 25 09:57:57 crc kubenswrapper[4854]: I1125 09:57:57.832885 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=7.832859247 podStartE2EDuration="7.832859247s" podCreationTimestamp="2025-11-25 09:57:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:57:57.823022006 +0000 UTC m=+1283.676015402" watchObservedRunningTime="2025-11-25 09:57:57.832859247 +0000 UTC m=+1283.685852633" Nov 25 09:57:57 crc kubenswrapper[4854]: I1125 09:57:57.860090 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=7.860063137 podStartE2EDuration="7.860063137s" podCreationTimestamp="2025-11-25 09:57:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:57:57.849776323 +0000 UTC m=+1283.702769719" watchObservedRunningTime="2025-11-25 09:57:57.860063137 +0000 UTC m=+1283.713056513" Nov 25 09:57:57 crc kubenswrapper[4854]: I1125 09:57:57.877934 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-56df8fb6b7-fd5ht" podStartSLOduration=6.877911208 podStartE2EDuration="6.877911208s" podCreationTimestamp="2025-11-25 09:57:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:57:57.875353117 +0000 UTC m=+1283.728346503" watchObservedRunningTime="2025-11-25 09:57:57.877911208 +0000 UTC m=+1283.730904584" Nov 25 09:57:58 crc kubenswrapper[4854]: I1125 09:57:58.189778 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Nov 25 09:57:58 crc kubenswrapper[4854]: I1125 09:57:58.821802 4854 generic.go:334] "Generic (PLEG): container finished" podID="9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb" containerID="e95499c001880a2f7439b1ed89af334beb9764b8371e6cc762af35f32f369db2" exitCode=0 Nov 25 09:57:58 crc kubenswrapper[4854]: I1125 09:57:58.821896 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-fjbh7" event={"ID":"9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb","Type":"ContainerDied","Data":"e95499c001880a2f7439b1ed89af334beb9764b8371e6cc762af35f32f369db2"} Nov 25 09:57:58 crc kubenswrapper[4854]: I1125 09:57:58.827141 4854 generic.go:334] "Generic (PLEG): container finished" podID="d50edb50-d2f3-4018-88d8-93899801fa1b" containerID="da9a2cf24a5a3140cb0cbeaf943978f3cce742dc2e3a927f854f0c00854f614a" exitCode=0 Nov 25 09:57:58 crc kubenswrapper[4854]: I1125 09:57:58.827178 4854 generic.go:334] "Generic (PLEG): container finished" podID="d50edb50-d2f3-4018-88d8-93899801fa1b" containerID="a9afcd7b2d3f6360a8ee1c1ca4d0eba3396615064f65af612fb547a3d6dac279" exitCode=143 Nov 25 09:57:58 crc kubenswrapper[4854]: I1125 09:57:58.827235 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d50edb50-d2f3-4018-88d8-93899801fa1b","Type":"ContainerDied","Data":"da9a2cf24a5a3140cb0cbeaf943978f3cce742dc2e3a927f854f0c00854f614a"} Nov 25 09:57:58 crc kubenswrapper[4854]: I1125 09:57:58.827291 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d50edb50-d2f3-4018-88d8-93899801fa1b","Type":"ContainerDied","Data":"a9afcd7b2d3f6360a8ee1c1ca4d0eba3396615064f65af612fb547a3d6dac279"} Nov 25 09:57:58 crc kubenswrapper[4854]: I1125 09:57:58.832703 4854 generic.go:334] "Generic (PLEG): container finished" podID="43fa4f31-2561-4dd1-8f31-5b7ed7b2b636" containerID="634d753e1742d50264ed245259940ebcc600fb54ba9e79978960ef6fdd4e6b43" exitCode=0 Nov 25 09:57:58 crc kubenswrapper[4854]: I1125 09:57:58.832786 4854 generic.go:334] "Generic (PLEG): container finished" podID="43fa4f31-2561-4dd1-8f31-5b7ed7b2b636" containerID="9e6ad237973c08ff37d1d0eb4465fae58e0f80f65e08334f009d0597e41e0d81" exitCode=143 Nov 25 09:57:58 crc kubenswrapper[4854]: I1125 09:57:58.832958 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636","Type":"ContainerDied","Data":"634d753e1742d50264ed245259940ebcc600fb54ba9e79978960ef6fdd4e6b43"} Nov 25 09:57:58 crc kubenswrapper[4854]: I1125 09:57:58.833002 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636","Type":"ContainerDied","Data":"9e6ad237973c08ff37d1d0eb4465fae58e0f80f65e08334f009d0597e41e0d81"} Nov 25 09:58:01 crc kubenswrapper[4854]: I1125 09:58:01.798020 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-56df8fb6b7-fd5ht" Nov 25 09:58:01 crc kubenswrapper[4854]: I1125 09:58:01.896962 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f59b8f679-5fq4s"] Nov 25 09:58:01 crc kubenswrapper[4854]: I1125 09:58:01.902446 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5f59b8f679-5fq4s" podUID="51aab4f1-d50b-47ba-b45d-e820d83ba125" containerName="dnsmasq-dns" containerID="cri-o://660d363b72322a4efe63f9d699e3155dc3aac15c0223fd287c9539a455968e41" gracePeriod=10 Nov 25 09:58:02 crc kubenswrapper[4854]: I1125 09:58:02.932414 4854 generic.go:334] "Generic (PLEG): container finished" podID="51aab4f1-d50b-47ba-b45d-e820d83ba125" containerID="660d363b72322a4efe63f9d699e3155dc3aac15c0223fd287c9539a455968e41" exitCode=0 Nov 25 09:58:02 crc kubenswrapper[4854]: I1125 09:58:02.932650 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f59b8f679-5fq4s" event={"ID":"51aab4f1-d50b-47ba-b45d-e820d83ba125","Type":"ContainerDied","Data":"660d363b72322a4efe63f9d699e3155dc3aac15c0223fd287c9539a455968e41"} Nov 25 09:58:03 crc kubenswrapper[4854]: I1125 09:58:03.775450 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5f59b8f679-5fq4s" podUID="51aab4f1-d50b-47ba-b45d-e820d83ba125" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.166:5353: connect: connection refused" Nov 25 09:58:08 crc kubenswrapper[4854]: I1125 09:58:08.189667 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Nov 25 09:58:08 crc kubenswrapper[4854]: I1125 09:58:08.195901 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Nov 25 09:58:08 crc kubenswrapper[4854]: E1125 09:58:08.716339 4854 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Nov 25 09:58:08 crc kubenswrapper[4854]: E1125 09:58:08.716548 4854 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-x25l4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-hkj5t_openstack(70897159-9d6f-44cc-9b46-5f6a5d18fd8b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:58:08 crc kubenswrapper[4854]: E1125 09:58:08.717770 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-hkj5t" podUID="70897159-9d6f-44cc-9b46-5f6a5d18fd8b" Nov 25 09:58:09 crc kubenswrapper[4854]: E1125 09:58:09.020393 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-hkj5t" podUID="70897159-9d6f-44cc-9b46-5f6a5d18fd8b" Nov 25 09:58:09 crc kubenswrapper[4854]: I1125 09:58:09.050375 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Nov 25 09:58:13 crc kubenswrapper[4854]: I1125 09:58:13.775967 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5f59b8f679-5fq4s" podUID="51aab4f1-d50b-47ba-b45d-e820d83ba125" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.166:5353: i/o timeout" Nov 25 09:58:18 crc kubenswrapper[4854]: I1125 09:58:18.780349 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5f59b8f679-5fq4s" podUID="51aab4f1-d50b-47ba-b45d-e820d83ba125" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.166:5353: i/o timeout" Nov 25 09:58:18 crc kubenswrapper[4854]: I1125 09:58:18.781351 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5f59b8f679-5fq4s" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.755546 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-fjbh7" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.764563 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f59b8f679-5fq4s" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.769834 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.817947 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/51aab4f1-d50b-47ba-b45d-e820d83ba125-dns-swift-storage-0\") pod \"51aab4f1-d50b-47ba-b45d-e820d83ba125\" (UID: \"51aab4f1-d50b-47ba-b45d-e820d83ba125\") " Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.817997 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-config-data\") pod \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\" (UID: \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\") " Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.818104 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/51aab4f1-d50b-47ba-b45d-e820d83ba125-ovsdbserver-nb\") pod \"51aab4f1-d50b-47ba-b45d-e820d83ba125\" (UID: \"51aab4f1-d50b-47ba-b45d-e820d83ba125\") " Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.818162 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\" (UID: \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\") " Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.818772 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-combined-ca-bundle\") pod \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\" (UID: \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\") " Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.818963 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dxplr\" (UniqueName: \"kubernetes.io/projected/51aab4f1-d50b-47ba-b45d-e820d83ba125-kube-api-access-dxplr\") pod \"51aab4f1-d50b-47ba-b45d-e820d83ba125\" (UID: \"51aab4f1-d50b-47ba-b45d-e820d83ba125\") " Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.819101 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n2jq5\" (UniqueName: \"kubernetes.io/projected/9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb-kube-api-access-n2jq5\") pod \"9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb\" (UID: \"9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb\") " Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.819558 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-httpd-run\") pod \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\" (UID: \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\") " Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.819702 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/51aab4f1-d50b-47ba-b45d-e820d83ba125-config\") pod \"51aab4f1-d50b-47ba-b45d-e820d83ba125\" (UID: \"51aab4f1-d50b-47ba-b45d-e820d83ba125\") " Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.820179 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-logs\") pod \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\" (UID: \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\") " Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.820318 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/51aab4f1-d50b-47ba-b45d-e820d83ba125-ovsdbserver-sb\") pod \"51aab4f1-d50b-47ba-b45d-e820d83ba125\" (UID: \"51aab4f1-d50b-47ba-b45d-e820d83ba125\") " Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.820412 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-scripts\") pod \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\" (UID: \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\") " Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.820555 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb-config-data\") pod \"9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb\" (UID: \"9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb\") " Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.820781 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb-fernet-keys\") pod \"9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb\" (UID: \"9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb\") " Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.821308 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb-combined-ca-bundle\") pod \"9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb\" (UID: \"9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb\") " Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.821462 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sj425\" (UniqueName: \"kubernetes.io/projected/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-kube-api-access-sj425\") pod \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\" (UID: \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\") " Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.821620 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb-scripts\") pod \"9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb\" (UID: \"9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb\") " Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.822100 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-internal-tls-certs\") pod \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\" (UID: \"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636\") " Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.822142 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb-credential-keys\") pod \"9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb\" (UID: \"9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb\") " Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.822207 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/51aab4f1-d50b-47ba-b45d-e820d83ba125-dns-svc\") pod \"51aab4f1-d50b-47ba-b45d-e820d83ba125\" (UID: \"51aab4f1-d50b-47ba-b45d-e820d83ba125\") " Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.823898 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "43fa4f31-2561-4dd1-8f31-5b7ed7b2b636" (UID: "43fa4f31-2561-4dd1-8f31-5b7ed7b2b636"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.829600 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "43fa4f31-2561-4dd1-8f31-5b7ed7b2b636" (UID: "43fa4f31-2561-4dd1-8f31-5b7ed7b2b636"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.830413 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-logs" (OuterVolumeSpecName: "logs") pod "43fa4f31-2561-4dd1-8f31-5b7ed7b2b636" (UID: "43fa4f31-2561-4dd1-8f31-5b7ed7b2b636"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.837283 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-scripts" (OuterVolumeSpecName: "scripts") pod "43fa4f31-2561-4dd1-8f31-5b7ed7b2b636" (UID: "43fa4f31-2561-4dd1-8f31-5b7ed7b2b636"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.840226 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb-scripts" (OuterVolumeSpecName: "scripts") pod "9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb" (UID: "9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.850398 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb-kube-api-access-n2jq5" (OuterVolumeSpecName: "kube-api-access-n2jq5") pod "9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb" (UID: "9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb"). InnerVolumeSpecName "kube-api-access-n2jq5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.872091 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "43fa4f31-2561-4dd1-8f31-5b7ed7b2b636" (UID: "43fa4f31-2561-4dd1-8f31-5b7ed7b2b636"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.874034 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb" (UID: "9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.874080 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb" (UID: "9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.874327 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51aab4f1-d50b-47ba-b45d-e820d83ba125-kube-api-access-dxplr" (OuterVolumeSpecName: "kube-api-access-dxplr") pod "51aab4f1-d50b-47ba-b45d-e820d83ba125" (UID: "51aab4f1-d50b-47ba-b45d-e820d83ba125"). InnerVolumeSpecName "kube-api-access-dxplr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.875243 4854 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.875270 4854 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.875303 4854 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.875319 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.875334 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dxplr\" (UniqueName: \"kubernetes.io/projected/51aab4f1-d50b-47ba-b45d-e820d83ba125-kube-api-access-dxplr\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.875346 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n2jq5\" (UniqueName: \"kubernetes.io/projected/9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb-kube-api-access-n2jq5\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.875358 4854 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.875369 4854 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.875380 4854 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.875391 4854 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.877665 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-kube-api-access-sj425" (OuterVolumeSpecName: "kube-api-access-sj425") pod "43fa4f31-2561-4dd1-8f31-5b7ed7b2b636" (UID: "43fa4f31-2561-4dd1-8f31-5b7ed7b2b636"). InnerVolumeSpecName "kube-api-access-sj425". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.882331 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb-config-data" (OuterVolumeSpecName: "config-data") pod "9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb" (UID: "9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.889809 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb" (UID: "9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.901451 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/51aab4f1-d50b-47ba-b45d-e820d83ba125-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "51aab4f1-d50b-47ba-b45d-e820d83ba125" (UID: "51aab4f1-d50b-47ba-b45d-e820d83ba125"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.911468 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-config-data" (OuterVolumeSpecName: "config-data") pod "43fa4f31-2561-4dd1-8f31-5b7ed7b2b636" (UID: "43fa4f31-2561-4dd1-8f31-5b7ed7b2b636"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.916594 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/51aab4f1-d50b-47ba-b45d-e820d83ba125-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "51aab4f1-d50b-47ba-b45d-e820d83ba125" (UID: "51aab4f1-d50b-47ba-b45d-e820d83ba125"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.930037 4854 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.950782 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/51aab4f1-d50b-47ba-b45d-e820d83ba125-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "51aab4f1-d50b-47ba-b45d-e820d83ba125" (UID: "51aab4f1-d50b-47ba-b45d-e820d83ba125"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.990292 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.990326 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sj425\" (UniqueName: \"kubernetes.io/projected/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-kube-api-access-sj425\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.990341 4854 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/51aab4f1-d50b-47ba-b45d-e820d83ba125-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.990354 4854 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/51aab4f1-d50b-47ba-b45d-e820d83ba125-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.990365 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.990378 4854 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.990389 4854 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/51aab4f1-d50b-47ba-b45d-e820d83ba125-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.990400 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.991191 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "43fa4f31-2561-4dd1-8f31-5b7ed7b2b636" (UID: "43fa4f31-2561-4dd1-8f31-5b7ed7b2b636"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:58:20 crc kubenswrapper[4854]: I1125 09:58:20.996499 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/51aab4f1-d50b-47ba-b45d-e820d83ba125-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "51aab4f1-d50b-47ba-b45d-e820d83ba125" (UID: "51aab4f1-d50b-47ba-b45d-e820d83ba125"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.013231 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/51aab4f1-d50b-47ba-b45d-e820d83ba125-config" (OuterVolumeSpecName: "config") pod "51aab4f1-d50b-47ba-b45d-e820d83ba125" (UID: "51aab4f1-d50b-47ba-b45d-e820d83ba125"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.094643 4854 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.094705 4854 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/51aab4f1-d50b-47ba-b45d-e820d83ba125-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.094715 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/51aab4f1-d50b-47ba-b45d-e820d83ba125-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.161196 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.161189 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"43fa4f31-2561-4dd1-8f31-5b7ed7b2b636","Type":"ContainerDied","Data":"9794a4f48a503d7fee6497e065156f2ee8800401214014e3510f32bcb3d4e2cc"} Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.161600 4854 scope.go:117] "RemoveContainer" containerID="634d753e1742d50264ed245259940ebcc600fb54ba9e79978960ef6fdd4e6b43" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.166443 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-fjbh7" event={"ID":"9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb","Type":"ContainerDied","Data":"411b6bfbaca9f65a812d278560ed5f74baeec115d880b9e83990567da973e0e5"} Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.166554 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="411b6bfbaca9f65a812d278560ed5f74baeec115d880b9e83990567da973e0e5" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.166706 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-fjbh7" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.170662 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f59b8f679-5fq4s" event={"ID":"51aab4f1-d50b-47ba-b45d-e820d83ba125","Type":"ContainerDied","Data":"768e901204c46b46dee173f6e2ac6661786f452acff98cd6e4b591f87c155e6e"} Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.170747 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f59b8f679-5fq4s" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.220245 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.233635 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.248769 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f59b8f679-5fq4s"] Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.263329 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5f59b8f679-5fq4s"] Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.275230 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:58:21 crc kubenswrapper[4854]: E1125 09:58:21.275837 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51aab4f1-d50b-47ba-b45d-e820d83ba125" containerName="dnsmasq-dns" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.275852 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="51aab4f1-d50b-47ba-b45d-e820d83ba125" containerName="dnsmasq-dns" Nov 25 09:58:21 crc kubenswrapper[4854]: E1125 09:58:21.275872 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43fa4f31-2561-4dd1-8f31-5b7ed7b2b636" containerName="glance-log" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.275879 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="43fa4f31-2561-4dd1-8f31-5b7ed7b2b636" containerName="glance-log" Nov 25 09:58:21 crc kubenswrapper[4854]: E1125 09:58:21.275914 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51aab4f1-d50b-47ba-b45d-e820d83ba125" containerName="init" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.275921 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="51aab4f1-d50b-47ba-b45d-e820d83ba125" containerName="init" Nov 25 09:58:21 crc kubenswrapper[4854]: E1125 09:58:21.275946 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43fa4f31-2561-4dd1-8f31-5b7ed7b2b636" containerName="glance-httpd" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.275953 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="43fa4f31-2561-4dd1-8f31-5b7ed7b2b636" containerName="glance-httpd" Nov 25 09:58:21 crc kubenswrapper[4854]: E1125 09:58:21.275975 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7" containerName="init" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.275982 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7" containerName="init" Nov 25 09:58:21 crc kubenswrapper[4854]: E1125 09:58:21.276000 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb" containerName="keystone-bootstrap" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.276008 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb" containerName="keystone-bootstrap" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.276253 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb" containerName="keystone-bootstrap" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.276273 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="43fa4f31-2561-4dd1-8f31-5b7ed7b2b636" containerName="glance-httpd" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.276290 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="43fa4f31-2561-4dd1-8f31-5b7ed7b2b636" containerName="glance-log" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.276303 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="51aab4f1-d50b-47ba-b45d-e820d83ba125" containerName="dnsmasq-dns" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.276312 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7" containerName="init" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.278038 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.280532 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.283590 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.288236 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.402311 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.402381 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9291c61a-5095-4ccb-a6a0-e1e618bfb501-logs\") pod \"glance-default-internal-api-0\" (UID: \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.402407 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9291c61a-5095-4ccb-a6a0-e1e618bfb501-scripts\") pod \"glance-default-internal-api-0\" (UID: \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.402490 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9291c61a-5095-4ccb-a6a0-e1e618bfb501-config-data\") pod \"glance-default-internal-api-0\" (UID: \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.402528 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9291c61a-5095-4ccb-a6a0-e1e618bfb501-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.402553 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qdv7s\" (UniqueName: \"kubernetes.io/projected/9291c61a-5095-4ccb-a6a0-e1e618bfb501-kube-api-access-qdv7s\") pod \"glance-default-internal-api-0\" (UID: \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.402578 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9291c61a-5095-4ccb-a6a0-e1e618bfb501-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.402658 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9291c61a-5095-4ccb-a6a0-e1e618bfb501-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.473764 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 09:58:21 crc kubenswrapper[4854]: E1125 09:58:21.474073 4854 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified" Nov 25 09:58:21 crc kubenswrapper[4854]: E1125 09:58:21.474276 4854 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n58dh657h56ch54ch68dh58ch576h569h646h64bh667hdbhbh674h575h5c5h5f8h675h59h5f8h667h587hd8hbch66h58chcch6fh687h65bh5fh5d4q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lflqq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(c7153c0a-3527-49ba-a5f2-0b6f5b1b219d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.505930 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9291c61a-5095-4ccb-a6a0-e1e618bfb501-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.506139 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.506195 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9291c61a-5095-4ccb-a6a0-e1e618bfb501-logs\") pod \"glance-default-internal-api-0\" (UID: \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.506227 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9291c61a-5095-4ccb-a6a0-e1e618bfb501-scripts\") pod \"glance-default-internal-api-0\" (UID: \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.506362 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9291c61a-5095-4ccb-a6a0-e1e618bfb501-config-data\") pod \"glance-default-internal-api-0\" (UID: \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.506422 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9291c61a-5095-4ccb-a6a0-e1e618bfb501-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.506444 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qdv7s\" (UniqueName: \"kubernetes.io/projected/9291c61a-5095-4ccb-a6a0-e1e618bfb501-kube-api-access-qdv7s\") pod \"glance-default-internal-api-0\" (UID: \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.506470 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9291c61a-5095-4ccb-a6a0-e1e618bfb501-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.506363 4854 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-internal-api-0" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.506824 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9291c61a-5095-4ccb-a6a0-e1e618bfb501-logs\") pod \"glance-default-internal-api-0\" (UID: \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.507584 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9291c61a-5095-4ccb-a6a0-e1e618bfb501-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.511127 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9291c61a-5095-4ccb-a6a0-e1e618bfb501-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.511127 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9291c61a-5095-4ccb-a6a0-e1e618bfb501-scripts\") pod \"glance-default-internal-api-0\" (UID: \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.512071 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9291c61a-5095-4ccb-a6a0-e1e618bfb501-config-data\") pod \"glance-default-internal-api-0\" (UID: \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.514980 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9291c61a-5095-4ccb-a6a0-e1e618bfb501-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.524628 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qdv7s\" (UniqueName: \"kubernetes.io/projected/9291c61a-5095-4ccb-a6a0-e1e618bfb501-kube-api-access-qdv7s\") pod \"glance-default-internal-api-0\" (UID: \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.547882 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.604258 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.607938 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d50edb50-d2f3-4018-88d8-93899801fa1b-logs\") pod \"d50edb50-d2f3-4018-88d8-93899801fa1b\" (UID: \"d50edb50-d2f3-4018-88d8-93899801fa1b\") " Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.607991 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d50edb50-d2f3-4018-88d8-93899801fa1b-combined-ca-bundle\") pod \"d50edb50-d2f3-4018-88d8-93899801fa1b\" (UID: \"d50edb50-d2f3-4018-88d8-93899801fa1b\") " Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.608159 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6pjg\" (UniqueName: \"kubernetes.io/projected/d50edb50-d2f3-4018-88d8-93899801fa1b-kube-api-access-d6pjg\") pod \"d50edb50-d2f3-4018-88d8-93899801fa1b\" (UID: \"d50edb50-d2f3-4018-88d8-93899801fa1b\") " Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.608184 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d50edb50-d2f3-4018-88d8-93899801fa1b-public-tls-certs\") pod \"d50edb50-d2f3-4018-88d8-93899801fa1b\" (UID: \"d50edb50-d2f3-4018-88d8-93899801fa1b\") " Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.608223 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"d50edb50-d2f3-4018-88d8-93899801fa1b\" (UID: \"d50edb50-d2f3-4018-88d8-93899801fa1b\") " Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.608267 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d50edb50-d2f3-4018-88d8-93899801fa1b-scripts\") pod \"d50edb50-d2f3-4018-88d8-93899801fa1b\" (UID: \"d50edb50-d2f3-4018-88d8-93899801fa1b\") " Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.608282 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d50edb50-d2f3-4018-88d8-93899801fa1b-httpd-run\") pod \"d50edb50-d2f3-4018-88d8-93899801fa1b\" (UID: \"d50edb50-d2f3-4018-88d8-93899801fa1b\") " Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.608352 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d50edb50-d2f3-4018-88d8-93899801fa1b-config-data\") pod \"d50edb50-d2f3-4018-88d8-93899801fa1b\" (UID: \"d50edb50-d2f3-4018-88d8-93899801fa1b\") " Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.608394 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d50edb50-d2f3-4018-88d8-93899801fa1b-logs" (OuterVolumeSpecName: "logs") pod "d50edb50-d2f3-4018-88d8-93899801fa1b" (UID: "d50edb50-d2f3-4018-88d8-93899801fa1b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.608784 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d50edb50-d2f3-4018-88d8-93899801fa1b-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "d50edb50-d2f3-4018-88d8-93899801fa1b" (UID: "d50edb50-d2f3-4018-88d8-93899801fa1b"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.608833 4854 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d50edb50-d2f3-4018-88d8-93899801fa1b-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.611381 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "glance") pod "d50edb50-d2f3-4018-88d8-93899801fa1b" (UID: "d50edb50-d2f3-4018-88d8-93899801fa1b"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.612665 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d50edb50-d2f3-4018-88d8-93899801fa1b-kube-api-access-d6pjg" (OuterVolumeSpecName: "kube-api-access-d6pjg") pod "d50edb50-d2f3-4018-88d8-93899801fa1b" (UID: "d50edb50-d2f3-4018-88d8-93899801fa1b"). InnerVolumeSpecName "kube-api-access-d6pjg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.612873 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d50edb50-d2f3-4018-88d8-93899801fa1b-scripts" (OuterVolumeSpecName: "scripts") pod "d50edb50-d2f3-4018-88d8-93899801fa1b" (UID: "d50edb50-d2f3-4018-88d8-93899801fa1b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.640382 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d50edb50-d2f3-4018-88d8-93899801fa1b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d50edb50-d2f3-4018-88d8-93899801fa1b" (UID: "d50edb50-d2f3-4018-88d8-93899801fa1b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.691009 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d50edb50-d2f3-4018-88d8-93899801fa1b-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "d50edb50-d2f3-4018-88d8-93899801fa1b" (UID: "d50edb50-d2f3-4018-88d8-93899801fa1b"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.711715 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d50edb50-d2f3-4018-88d8-93899801fa1b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.711752 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6pjg\" (UniqueName: \"kubernetes.io/projected/d50edb50-d2f3-4018-88d8-93899801fa1b-kube-api-access-d6pjg\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.711767 4854 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d50edb50-d2f3-4018-88d8-93899801fa1b-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.711814 4854 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.711827 4854 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d50edb50-d2f3-4018-88d8-93899801fa1b-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.711885 4854 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d50edb50-d2f3-4018-88d8-93899801fa1b-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.717817 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d50edb50-d2f3-4018-88d8-93899801fa1b-config-data" (OuterVolumeSpecName: "config-data") pod "d50edb50-d2f3-4018-88d8-93899801fa1b" (UID: "d50edb50-d2f3-4018-88d8-93899801fa1b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.744606 4854 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.813571 4854 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.813944 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d50edb50-d2f3-4018-88d8-93899801fa1b-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.858414 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-fjbh7"] Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.871049 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-fjbh7"] Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.958499 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-h58qt"] Nov 25 09:58:21 crc kubenswrapper[4854]: E1125 09:58:21.959158 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d50edb50-d2f3-4018-88d8-93899801fa1b" containerName="glance-httpd" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.959183 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="d50edb50-d2f3-4018-88d8-93899801fa1b" containerName="glance-httpd" Nov 25 09:58:21 crc kubenswrapper[4854]: E1125 09:58:21.959211 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d50edb50-d2f3-4018-88d8-93899801fa1b" containerName="glance-log" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.959221 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="d50edb50-d2f3-4018-88d8-93899801fa1b" containerName="glance-log" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.959440 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="d50edb50-d2f3-4018-88d8-93899801fa1b" containerName="glance-log" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.959462 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="d50edb50-d2f3-4018-88d8-93899801fa1b" containerName="glance-httpd" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.960246 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-h58qt" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.963853 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.963937 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.964005 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-wks95" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.964196 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.964344 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 09:58:21 crc kubenswrapper[4854]: I1125 09:58:21.973919 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-h58qt"] Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.124117 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98-config-data\") pod \"keystone-bootstrap-h58qt\" (UID: \"3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98\") " pod="openstack/keystone-bootstrap-h58qt" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.124243 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98-fernet-keys\") pod \"keystone-bootstrap-h58qt\" (UID: \"3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98\") " pod="openstack/keystone-bootstrap-h58qt" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.124320 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f4hdx\" (UniqueName: \"kubernetes.io/projected/3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98-kube-api-access-f4hdx\") pod \"keystone-bootstrap-h58qt\" (UID: \"3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98\") " pod="openstack/keystone-bootstrap-h58qt" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.124414 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98-combined-ca-bundle\") pod \"keystone-bootstrap-h58qt\" (UID: \"3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98\") " pod="openstack/keystone-bootstrap-h58qt" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.124442 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98-credential-keys\") pod \"keystone-bootstrap-h58qt\" (UID: \"3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98\") " pod="openstack/keystone-bootstrap-h58qt" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.124843 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98-scripts\") pod \"keystone-bootstrap-h58qt\" (UID: \"3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98\") " pod="openstack/keystone-bootstrap-h58qt" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.183215 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d50edb50-d2f3-4018-88d8-93899801fa1b","Type":"ContainerDied","Data":"1a958893fd967a0a36c7463c5a05a427f6d6bd1807dc52d6aa75d97d87e3ac57"} Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.183299 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.229245 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98-config-data\") pod \"keystone-bootstrap-h58qt\" (UID: \"3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98\") " pod="openstack/keystone-bootstrap-h58qt" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.229334 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98-fernet-keys\") pod \"keystone-bootstrap-h58qt\" (UID: \"3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98\") " pod="openstack/keystone-bootstrap-h58qt" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.229363 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f4hdx\" (UniqueName: \"kubernetes.io/projected/3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98-kube-api-access-f4hdx\") pod \"keystone-bootstrap-h58qt\" (UID: \"3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98\") " pod="openstack/keystone-bootstrap-h58qt" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.229412 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98-combined-ca-bundle\") pod \"keystone-bootstrap-h58qt\" (UID: \"3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98\") " pod="openstack/keystone-bootstrap-h58qt" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.229436 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98-credential-keys\") pod \"keystone-bootstrap-h58qt\" (UID: \"3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98\") " pod="openstack/keystone-bootstrap-h58qt" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.229554 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98-scripts\") pod \"keystone-bootstrap-h58qt\" (UID: \"3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98\") " pod="openstack/keystone-bootstrap-h58qt" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.231969 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.241585 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98-scripts\") pod \"keystone-bootstrap-h58qt\" (UID: \"3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98\") " pod="openstack/keystone-bootstrap-h58qt" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.242210 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98-combined-ca-bundle\") pod \"keystone-bootstrap-h58qt\" (UID: \"3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98\") " pod="openstack/keystone-bootstrap-h58qt" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.243038 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98-config-data\") pod \"keystone-bootstrap-h58qt\" (UID: \"3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98\") " pod="openstack/keystone-bootstrap-h58qt" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.244908 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98-credential-keys\") pod \"keystone-bootstrap-h58qt\" (UID: \"3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98\") " pod="openstack/keystone-bootstrap-h58qt" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.245395 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98-fernet-keys\") pod \"keystone-bootstrap-h58qt\" (UID: \"3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98\") " pod="openstack/keystone-bootstrap-h58qt" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.257803 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.265233 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f4hdx\" (UniqueName: \"kubernetes.io/projected/3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98-kube-api-access-f4hdx\") pod \"keystone-bootstrap-h58qt\" (UID: \"3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98\") " pod="openstack/keystone-bootstrap-h58qt" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.276733 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.278368 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.282266 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.282438 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.295975 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.327016 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-h58qt" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.433649 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6r6j2\" (UniqueName: \"kubernetes.io/projected/c67dbcea-a3b9-46ac-833c-97595c61756e-kube-api-access-6r6j2\") pod \"glance-default-external-api-0\" (UID: \"c67dbcea-a3b9-46ac-833c-97595c61756e\") " pod="openstack/glance-default-external-api-0" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.433715 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c67dbcea-a3b9-46ac-833c-97595c61756e-scripts\") pod \"glance-default-external-api-0\" (UID: \"c67dbcea-a3b9-46ac-833c-97595c61756e\") " pod="openstack/glance-default-external-api-0" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.433952 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c67dbcea-a3b9-46ac-833c-97595c61756e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c67dbcea-a3b9-46ac-833c-97595c61756e\") " pod="openstack/glance-default-external-api-0" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.434053 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c67dbcea-a3b9-46ac-833c-97595c61756e-config-data\") pod \"glance-default-external-api-0\" (UID: \"c67dbcea-a3b9-46ac-833c-97595c61756e\") " pod="openstack/glance-default-external-api-0" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.434376 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"c67dbcea-a3b9-46ac-833c-97595c61756e\") " pod="openstack/glance-default-external-api-0" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.434486 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c67dbcea-a3b9-46ac-833c-97595c61756e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c67dbcea-a3b9-46ac-833c-97595c61756e\") " pod="openstack/glance-default-external-api-0" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.434555 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c67dbcea-a3b9-46ac-833c-97595c61756e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"c67dbcea-a3b9-46ac-833c-97595c61756e\") " pod="openstack/glance-default-external-api-0" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.434625 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c67dbcea-a3b9-46ac-833c-97595c61756e-logs\") pod \"glance-default-external-api-0\" (UID: \"c67dbcea-a3b9-46ac-833c-97595c61756e\") " pod="openstack/glance-default-external-api-0" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.536029 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c67dbcea-a3b9-46ac-833c-97595c61756e-config-data\") pod \"glance-default-external-api-0\" (UID: \"c67dbcea-a3b9-46ac-833c-97595c61756e\") " pod="openstack/glance-default-external-api-0" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.536129 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"c67dbcea-a3b9-46ac-833c-97595c61756e\") " pod="openstack/glance-default-external-api-0" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.536161 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c67dbcea-a3b9-46ac-833c-97595c61756e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c67dbcea-a3b9-46ac-833c-97595c61756e\") " pod="openstack/glance-default-external-api-0" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.536186 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c67dbcea-a3b9-46ac-833c-97595c61756e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"c67dbcea-a3b9-46ac-833c-97595c61756e\") " pod="openstack/glance-default-external-api-0" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.536207 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c67dbcea-a3b9-46ac-833c-97595c61756e-logs\") pod \"glance-default-external-api-0\" (UID: \"c67dbcea-a3b9-46ac-833c-97595c61756e\") " pod="openstack/glance-default-external-api-0" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.536606 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6r6j2\" (UniqueName: \"kubernetes.io/projected/c67dbcea-a3b9-46ac-833c-97595c61756e-kube-api-access-6r6j2\") pod \"glance-default-external-api-0\" (UID: \"c67dbcea-a3b9-46ac-833c-97595c61756e\") " pod="openstack/glance-default-external-api-0" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.536632 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c67dbcea-a3b9-46ac-833c-97595c61756e-scripts\") pod \"glance-default-external-api-0\" (UID: \"c67dbcea-a3b9-46ac-833c-97595c61756e\") " pod="openstack/glance-default-external-api-0" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.536643 4854 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"c67dbcea-a3b9-46ac-833c-97595c61756e\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-external-api-0" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.536703 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c67dbcea-a3b9-46ac-833c-97595c61756e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c67dbcea-a3b9-46ac-833c-97595c61756e\") " pod="openstack/glance-default-external-api-0" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.536757 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c67dbcea-a3b9-46ac-833c-97595c61756e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c67dbcea-a3b9-46ac-833c-97595c61756e\") " pod="openstack/glance-default-external-api-0" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.536813 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c67dbcea-a3b9-46ac-833c-97595c61756e-logs\") pod \"glance-default-external-api-0\" (UID: \"c67dbcea-a3b9-46ac-833c-97595c61756e\") " pod="openstack/glance-default-external-api-0" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.540433 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c67dbcea-a3b9-46ac-833c-97595c61756e-scripts\") pod \"glance-default-external-api-0\" (UID: \"c67dbcea-a3b9-46ac-833c-97595c61756e\") " pod="openstack/glance-default-external-api-0" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.540725 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c67dbcea-a3b9-46ac-833c-97595c61756e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c67dbcea-a3b9-46ac-833c-97595c61756e\") " pod="openstack/glance-default-external-api-0" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.541295 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c67dbcea-a3b9-46ac-833c-97595c61756e-config-data\") pod \"glance-default-external-api-0\" (UID: \"c67dbcea-a3b9-46ac-833c-97595c61756e\") " pod="openstack/glance-default-external-api-0" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.543163 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c67dbcea-a3b9-46ac-833c-97595c61756e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"c67dbcea-a3b9-46ac-833c-97595c61756e\") " pod="openstack/glance-default-external-api-0" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.565061 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6r6j2\" (UniqueName: \"kubernetes.io/projected/c67dbcea-a3b9-46ac-833c-97595c61756e-kube-api-access-6r6j2\") pod \"glance-default-external-api-0\" (UID: \"c67dbcea-a3b9-46ac-833c-97595c61756e\") " pod="openstack/glance-default-external-api-0" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.575630 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"c67dbcea-a3b9-46ac-833c-97595c61756e\") " pod="openstack/glance-default-external-api-0" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.667427 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.866817 4854 scope.go:117] "RemoveContainer" containerID="9e6ad237973c08ff37d1d0eb4465fae58e0f80f65e08334f009d0597e41e0d81" Nov 25 09:58:22 crc kubenswrapper[4854]: E1125 09:58:22.909365 4854 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Nov 25 09:58:22 crc kubenswrapper[4854]: E1125 09:58:22.909511 4854 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l4dnk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-p594s_openstack(4f828059-1092-45cd-99a8-3915b6bab37f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:58:22 crc kubenswrapper[4854]: E1125 09:58:22.911518 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-p594s" podUID="4f828059-1092-45cd-99a8-3915b6bab37f" Nov 25 09:58:22 crc kubenswrapper[4854]: I1125 09:58:22.945661 4854 scope.go:117] "RemoveContainer" containerID="660d363b72322a4efe63f9d699e3155dc3aac15c0223fd287c9539a455968e41" Nov 25 09:58:23 crc kubenswrapper[4854]: I1125 09:58:23.111256 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43fa4f31-2561-4dd1-8f31-5b7ed7b2b636" path="/var/lib/kubelet/pods/43fa4f31-2561-4dd1-8f31-5b7ed7b2b636/volumes" Nov 25 09:58:23 crc kubenswrapper[4854]: I1125 09:58:23.112551 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="51aab4f1-d50b-47ba-b45d-e820d83ba125" path="/var/lib/kubelet/pods/51aab4f1-d50b-47ba-b45d-e820d83ba125/volumes" Nov 25 09:58:23 crc kubenswrapper[4854]: I1125 09:58:23.113248 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb" path="/var/lib/kubelet/pods/9513f35b-3be6-40f4-82cf-e1fb7fdaf5fb/volumes" Nov 25 09:58:23 crc kubenswrapper[4854]: I1125 09:58:23.116962 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d50edb50-d2f3-4018-88d8-93899801fa1b" path="/var/lib/kubelet/pods/d50edb50-d2f3-4018-88d8-93899801fa1b/volumes" Nov 25 09:58:23 crc kubenswrapper[4854]: I1125 09:58:23.144637 4854 scope.go:117] "RemoveContainer" containerID="a9e7cd7e080eeec6a6f18285c55c32226cf2e4b168a5d18ed7b223a66a158f83" Nov 25 09:58:23 crc kubenswrapper[4854]: I1125 09:58:23.187113 4854 scope.go:117] "RemoveContainer" containerID="da9a2cf24a5a3140cb0cbeaf943978f3cce742dc2e3a927f854f0c00854f614a" Nov 25 09:58:23 crc kubenswrapper[4854]: E1125 09:58:23.208019 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-p594s" podUID="4f828059-1092-45cd-99a8-3915b6bab37f" Nov 25 09:58:23 crc kubenswrapper[4854]: I1125 09:58:23.245967 4854 scope.go:117] "RemoveContainer" containerID="a9afcd7b2d3f6360a8ee1c1ca4d0eba3396615064f65af612fb547a3d6dac279" Nov 25 09:58:23 crc kubenswrapper[4854]: I1125 09:58:23.501538 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-h58qt"] Nov 25 09:58:23 crc kubenswrapper[4854]: I1125 09:58:23.775988 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:58:23 crc kubenswrapper[4854]: I1125 09:58:23.784154 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5f59b8f679-5fq4s" podUID="51aab4f1-d50b-47ba-b45d-e820d83ba125" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.166:5353: i/o timeout" Nov 25 09:58:23 crc kubenswrapper[4854]: W1125 09:58:23.954018 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc67dbcea_a3b9_46ac_833c_97595c61756e.slice/crio-268ac2fd85d2bf853f42fb82f17f834cb4c6cfd52bc5afb8e54cc00830e62329 WatchSource:0}: Error finding container 268ac2fd85d2bf853f42fb82f17f834cb4c6cfd52bc5afb8e54cc00830e62329: Status 404 returned error can't find the container with id 268ac2fd85d2bf853f42fb82f17f834cb4c6cfd52bc5afb8e54cc00830e62329 Nov 25 09:58:23 crc kubenswrapper[4854]: W1125 09:58:23.956601 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3bd0c35d_5f7c_47aa_bdf3_5d5e7ff61c98.slice/crio-8b95ed172f66873363ec7441a58615fd7caa65d962123e0877b89533aa2603ba WatchSource:0}: Error finding container 8b95ed172f66873363ec7441a58615fd7caa65d962123e0877b89533aa2603ba: Status 404 returned error can't find the container with id 8b95ed172f66873363ec7441a58615fd7caa65d962123e0877b89533aa2603ba Nov 25 09:58:24 crc kubenswrapper[4854]: I1125 09:58:24.255935 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c67dbcea-a3b9-46ac-833c-97595c61756e","Type":"ContainerStarted","Data":"268ac2fd85d2bf853f42fb82f17f834cb4c6cfd52bc5afb8e54cc00830e62329"} Nov 25 09:58:24 crc kubenswrapper[4854]: I1125 09:58:24.262006 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-pfkjn" event={"ID":"ec8d5beb-439a-4921-a6b8-029331402149","Type":"ContainerStarted","Data":"1228aff76ce4e075c53c6f5d77ee2da25523328069aa7ab95cf151dc04c7c6f7"} Nov 25 09:58:24 crc kubenswrapper[4854]: I1125 09:58:24.264465 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-5n9gc" event={"ID":"33e7955b-280b-4907-a8ee-7fb1a46d6352","Type":"ContainerStarted","Data":"e644f8b37a3dd8708617101f297f3fb6fe3734660cd125b9a812de9c4e28cf78"} Nov 25 09:58:24 crc kubenswrapper[4854]: I1125 09:58:24.268145 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-h58qt" event={"ID":"3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98","Type":"ContainerStarted","Data":"8b95ed172f66873363ec7441a58615fd7caa65d962123e0877b89533aa2603ba"} Nov 25 09:58:24 crc kubenswrapper[4854]: I1125 09:58:24.285340 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-sync-pfkjn" podStartSLOduration=3.88005842 podStartE2EDuration="34.285315409s" podCreationTimestamp="2025-11-25 09:57:50 +0000 UTC" firstStartedPulling="2025-11-25 09:57:52.484468591 +0000 UTC m=+1278.337461967" lastFinishedPulling="2025-11-25 09:58:22.88972558 +0000 UTC m=+1308.742718956" observedRunningTime="2025-11-25 09:58:24.282325837 +0000 UTC m=+1310.135319213" watchObservedRunningTime="2025-11-25 09:58:24.285315409 +0000 UTC m=+1310.138308785" Nov 25 09:58:24 crc kubenswrapper[4854]: I1125 09:58:24.313392 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-5n9gc" podStartSLOduration=5.3959807810000004 podStartE2EDuration="33.313164796s" podCreationTimestamp="2025-11-25 09:57:51 +0000 UTC" firstStartedPulling="2025-11-25 09:57:53.531456333 +0000 UTC m=+1279.384449709" lastFinishedPulling="2025-11-25 09:58:21.448640348 +0000 UTC m=+1307.301633724" observedRunningTime="2025-11-25 09:58:24.307909842 +0000 UTC m=+1310.160903218" watchObservedRunningTime="2025-11-25 09:58:24.313164796 +0000 UTC m=+1310.166158172" Nov 25 09:58:24 crc kubenswrapper[4854]: I1125 09:58:24.393336 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:58:25 crc kubenswrapper[4854]: I1125 09:58:25.077761 4854 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7] : Timed out while waiting for systemd to remove kubepods-besteffort-pod3c746cab_609e_45d1_b9ff_b5b3a9b1d3b7.slice" Nov 25 09:58:25 crc kubenswrapper[4854]: E1125 09:58:25.078150 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to delete cgroup paths for [kubepods besteffort pod3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7] : unable to destroy cgroup paths for cgroup [kubepods besteffort pod3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7] : Timed out while waiting for systemd to remove kubepods-besteffort-pod3c746cab_609e_45d1_b9ff_b5b3a9b1d3b7.slice" pod="openstack/dnsmasq-dns-bbf5cc879-tsxsg" podUID="3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7" Nov 25 09:58:25 crc kubenswrapper[4854]: I1125 09:58:25.295962 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-hkj5t" event={"ID":"70897159-9d6f-44cc-9b46-5f6a5d18fd8b","Type":"ContainerStarted","Data":"089d2a430f10afef4cfd498ffb2b28f64c94c7dfd45e7109263a3e93173a32a3"} Nov 25 09:58:25 crc kubenswrapper[4854]: I1125 09:58:25.306305 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-h58qt" event={"ID":"3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98","Type":"ContainerStarted","Data":"10f7ec26259addf257420fc5f9b059d31430876f4caed02a03148e2aa636e30a"} Nov 25 09:58:25 crc kubenswrapper[4854]: I1125 09:58:25.317904 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c67dbcea-a3b9-46ac-833c-97595c61756e","Type":"ContainerStarted","Data":"828a51a9b39919a71b5ae3a8bf1007751e179b303d3e3675ecd94d37882a7715"} Nov 25 09:58:25 crc kubenswrapper[4854]: I1125 09:58:25.319792 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-hkj5t" podStartSLOduration=3.248349419 podStartE2EDuration="34.319775907s" podCreationTimestamp="2025-11-25 09:57:51 +0000 UTC" firstStartedPulling="2025-11-25 09:57:52.998832731 +0000 UTC m=+1278.851826107" lastFinishedPulling="2025-11-25 09:58:24.070259219 +0000 UTC m=+1309.923252595" observedRunningTime="2025-11-25 09:58:25.316783925 +0000 UTC m=+1311.169777321" watchObservedRunningTime="2025-11-25 09:58:25.319775907 +0000 UTC m=+1311.172769283" Nov 25 09:58:25 crc kubenswrapper[4854]: I1125 09:58:25.321228 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d","Type":"ContainerStarted","Data":"0e07de8aeee86e3295322c7fff2756ab9347f33c7f606b93fb11b523a120b651"} Nov 25 09:58:25 crc kubenswrapper[4854]: I1125 09:58:25.324275 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bbf5cc879-tsxsg" Nov 25 09:58:25 crc kubenswrapper[4854]: I1125 09:58:25.324273 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9291c61a-5095-4ccb-a6a0-e1e618bfb501","Type":"ContainerStarted","Data":"f1a6cbcb770c9dcf08497157b59b07b98a4ebfc9e1e40169544fc1aaa7aefeb3"} Nov 25 09:58:25 crc kubenswrapper[4854]: I1125 09:58:25.324339 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9291c61a-5095-4ccb-a6a0-e1e618bfb501","Type":"ContainerStarted","Data":"8fb3cb04a044cd35c6dfcba269299d9be3fff9c987b28796f74432362b33851a"} Nov 25 09:58:25 crc kubenswrapper[4854]: I1125 09:58:25.337450 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-h58qt" podStartSLOduration=4.337431163 podStartE2EDuration="4.337431163s" podCreationTimestamp="2025-11-25 09:58:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:58:25.331927502 +0000 UTC m=+1311.184920898" watchObservedRunningTime="2025-11-25 09:58:25.337431163 +0000 UTC m=+1311.190424539" Nov 25 09:58:25 crc kubenswrapper[4854]: I1125 09:58:25.386223 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bbf5cc879-tsxsg"] Nov 25 09:58:25 crc kubenswrapper[4854]: I1125 09:58:25.402850 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-bbf5cc879-tsxsg"] Nov 25 09:58:26 crc kubenswrapper[4854]: I1125 09:58:26.339037 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c67dbcea-a3b9-46ac-833c-97595c61756e","Type":"ContainerStarted","Data":"ce845be113d7c4914879cee65d8279f1fe6e5641babefbbdaa7470c88928078b"} Nov 25 09:58:26 crc kubenswrapper[4854]: I1125 09:58:26.343537 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9291c61a-5095-4ccb-a6a0-e1e618bfb501","Type":"ContainerStarted","Data":"3925d0ecbcf492ecf430f31674a2f8cf6d1429050e7fd4aff7d6dae030077326"} Nov 25 09:58:26 crc kubenswrapper[4854]: I1125 09:58:26.379074 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.379053218 podStartE2EDuration="4.379053218s" podCreationTimestamp="2025-11-25 09:58:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:58:26.372992212 +0000 UTC m=+1312.225985588" watchObservedRunningTime="2025-11-25 09:58:26.379053218 +0000 UTC m=+1312.232046594" Nov 25 09:58:26 crc kubenswrapper[4854]: I1125 09:58:26.394542 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=5.394520414 podStartE2EDuration="5.394520414s" podCreationTimestamp="2025-11-25 09:58:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:58:26.390100402 +0000 UTC m=+1312.243093798" watchObservedRunningTime="2025-11-25 09:58:26.394520414 +0000 UTC m=+1312.247513790" Nov 25 09:58:27 crc kubenswrapper[4854]: I1125 09:58:27.027461 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7" path="/var/lib/kubelet/pods/3c746cab-609e-45d1-b9ff-b5b3a9b1d3b7/volumes" Nov 25 09:58:30 crc kubenswrapper[4854]: I1125 09:58:30.402825 4854 generic.go:334] "Generic (PLEG): container finished" podID="33e7955b-280b-4907-a8ee-7fb1a46d6352" containerID="e644f8b37a3dd8708617101f297f3fb6fe3734660cd125b9a812de9c4e28cf78" exitCode=0 Nov 25 09:58:30 crc kubenswrapper[4854]: I1125 09:58:30.402916 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-5n9gc" event={"ID":"33e7955b-280b-4907-a8ee-7fb1a46d6352","Type":"ContainerDied","Data":"e644f8b37a3dd8708617101f297f3fb6fe3734660cd125b9a812de9c4e28cf78"} Nov 25 09:58:31 crc kubenswrapper[4854]: I1125 09:58:31.319682 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-c7l4t"] Nov 25 09:58:31 crc kubenswrapper[4854]: I1125 09:58:31.323462 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c7l4t" Nov 25 09:58:31 crc kubenswrapper[4854]: I1125 09:58:31.354818 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-c7l4t"] Nov 25 09:58:31 crc kubenswrapper[4854]: I1125 09:58:31.381523 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb7eee5b-75e6-483b-a68e-1d6e39814690-catalog-content\") pod \"redhat-operators-c7l4t\" (UID: \"bb7eee5b-75e6-483b-a68e-1d6e39814690\") " pod="openshift-marketplace/redhat-operators-c7l4t" Nov 25 09:58:31 crc kubenswrapper[4854]: I1125 09:58:31.381596 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb7eee5b-75e6-483b-a68e-1d6e39814690-utilities\") pod \"redhat-operators-c7l4t\" (UID: \"bb7eee5b-75e6-483b-a68e-1d6e39814690\") " pod="openshift-marketplace/redhat-operators-c7l4t" Nov 25 09:58:31 crc kubenswrapper[4854]: I1125 09:58:31.381795 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qdrw7\" (UniqueName: \"kubernetes.io/projected/bb7eee5b-75e6-483b-a68e-1d6e39814690-kube-api-access-qdrw7\") pod \"redhat-operators-c7l4t\" (UID: \"bb7eee5b-75e6-483b-a68e-1d6e39814690\") " pod="openshift-marketplace/redhat-operators-c7l4t" Nov 25 09:58:31 crc kubenswrapper[4854]: I1125 09:58:31.483275 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb7eee5b-75e6-483b-a68e-1d6e39814690-catalog-content\") pod \"redhat-operators-c7l4t\" (UID: \"bb7eee5b-75e6-483b-a68e-1d6e39814690\") " pod="openshift-marketplace/redhat-operators-c7l4t" Nov 25 09:58:31 crc kubenswrapper[4854]: I1125 09:58:31.483336 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb7eee5b-75e6-483b-a68e-1d6e39814690-utilities\") pod \"redhat-operators-c7l4t\" (UID: \"bb7eee5b-75e6-483b-a68e-1d6e39814690\") " pod="openshift-marketplace/redhat-operators-c7l4t" Nov 25 09:58:31 crc kubenswrapper[4854]: I1125 09:58:31.483400 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qdrw7\" (UniqueName: \"kubernetes.io/projected/bb7eee5b-75e6-483b-a68e-1d6e39814690-kube-api-access-qdrw7\") pod \"redhat-operators-c7l4t\" (UID: \"bb7eee5b-75e6-483b-a68e-1d6e39814690\") " pod="openshift-marketplace/redhat-operators-c7l4t" Nov 25 09:58:31 crc kubenswrapper[4854]: I1125 09:58:31.483778 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb7eee5b-75e6-483b-a68e-1d6e39814690-catalog-content\") pod \"redhat-operators-c7l4t\" (UID: \"bb7eee5b-75e6-483b-a68e-1d6e39814690\") " pod="openshift-marketplace/redhat-operators-c7l4t" Nov 25 09:58:31 crc kubenswrapper[4854]: I1125 09:58:31.483998 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb7eee5b-75e6-483b-a68e-1d6e39814690-utilities\") pod \"redhat-operators-c7l4t\" (UID: \"bb7eee5b-75e6-483b-a68e-1d6e39814690\") " pod="openshift-marketplace/redhat-operators-c7l4t" Nov 25 09:58:31 crc kubenswrapper[4854]: I1125 09:58:31.511969 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qdrw7\" (UniqueName: \"kubernetes.io/projected/bb7eee5b-75e6-483b-a68e-1d6e39814690-kube-api-access-qdrw7\") pod \"redhat-operators-c7l4t\" (UID: \"bb7eee5b-75e6-483b-a68e-1d6e39814690\") " pod="openshift-marketplace/redhat-operators-c7l4t" Nov 25 09:58:31 crc kubenswrapper[4854]: I1125 09:58:31.605608 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 25 09:58:31 crc kubenswrapper[4854]: I1125 09:58:31.605971 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 25 09:58:31 crc kubenswrapper[4854]: I1125 09:58:31.650049 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 25 09:58:31 crc kubenswrapper[4854]: I1125 09:58:31.657362 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c7l4t" Nov 25 09:58:31 crc kubenswrapper[4854]: I1125 09:58:31.667383 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 25 09:58:31 crc kubenswrapper[4854]: I1125 09:58:31.991729 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-5n9gc" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.100846 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/33e7955b-280b-4907-a8ee-7fb1a46d6352-logs\") pod \"33e7955b-280b-4907-a8ee-7fb1a46d6352\" (UID: \"33e7955b-280b-4907-a8ee-7fb1a46d6352\") " Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.100987 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33e7955b-280b-4907-a8ee-7fb1a46d6352-config-data\") pod \"33e7955b-280b-4907-a8ee-7fb1a46d6352\" (UID: \"33e7955b-280b-4907-a8ee-7fb1a46d6352\") " Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.101115 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33e7955b-280b-4907-a8ee-7fb1a46d6352-combined-ca-bundle\") pod \"33e7955b-280b-4907-a8ee-7fb1a46d6352\" (UID: \"33e7955b-280b-4907-a8ee-7fb1a46d6352\") " Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.101181 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33e7955b-280b-4907-a8ee-7fb1a46d6352-scripts\") pod \"33e7955b-280b-4907-a8ee-7fb1a46d6352\" (UID: \"33e7955b-280b-4907-a8ee-7fb1a46d6352\") " Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.101208 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/33e7955b-280b-4907-a8ee-7fb1a46d6352-logs" (OuterVolumeSpecName: "logs") pod "33e7955b-280b-4907-a8ee-7fb1a46d6352" (UID: "33e7955b-280b-4907-a8ee-7fb1a46d6352"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.101246 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nls2f\" (UniqueName: \"kubernetes.io/projected/33e7955b-280b-4907-a8ee-7fb1a46d6352-kube-api-access-nls2f\") pod \"33e7955b-280b-4907-a8ee-7fb1a46d6352\" (UID: \"33e7955b-280b-4907-a8ee-7fb1a46d6352\") " Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.101927 4854 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/33e7955b-280b-4907-a8ee-7fb1a46d6352-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.105831 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33e7955b-280b-4907-a8ee-7fb1a46d6352-kube-api-access-nls2f" (OuterVolumeSpecName: "kube-api-access-nls2f") pod "33e7955b-280b-4907-a8ee-7fb1a46d6352" (UID: "33e7955b-280b-4907-a8ee-7fb1a46d6352"). InnerVolumeSpecName "kube-api-access-nls2f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.113852 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33e7955b-280b-4907-a8ee-7fb1a46d6352-scripts" (OuterVolumeSpecName: "scripts") pod "33e7955b-280b-4907-a8ee-7fb1a46d6352" (UID: "33e7955b-280b-4907-a8ee-7fb1a46d6352"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.145031 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33e7955b-280b-4907-a8ee-7fb1a46d6352-config-data" (OuterVolumeSpecName: "config-data") pod "33e7955b-280b-4907-a8ee-7fb1a46d6352" (UID: "33e7955b-280b-4907-a8ee-7fb1a46d6352"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.166421 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33e7955b-280b-4907-a8ee-7fb1a46d6352-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "33e7955b-280b-4907-a8ee-7fb1a46d6352" (UID: "33e7955b-280b-4907-a8ee-7fb1a46d6352"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.207954 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33e7955b-280b-4907-a8ee-7fb1a46d6352-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.208018 4854 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33e7955b-280b-4907-a8ee-7fb1a46d6352-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.208031 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nls2f\" (UniqueName: \"kubernetes.io/projected/33e7955b-280b-4907-a8ee-7fb1a46d6352-kube-api-access-nls2f\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.208042 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33e7955b-280b-4907-a8ee-7fb1a46d6352-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.428079 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-5n9gc" event={"ID":"33e7955b-280b-4907-a8ee-7fb1a46d6352","Type":"ContainerDied","Data":"55f14d76d4c0b7d195763aa2a8e5d550c9c4d401cf6262cccf1d9f826103ec61"} Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.428327 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="55f14d76d4c0b7d195763aa2a8e5d550c9c4d401cf6262cccf1d9f826103ec61" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.428110 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-5n9gc" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.430143 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d","Type":"ContainerStarted","Data":"d8159c14ac650c6b0bf71ec92dd0aea9bbf8a80af16eae92942c0d587ea54711"} Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.431108 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.431155 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.463773 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-c7l4t"] Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.545188 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-557b4bfdc4-lt7bk"] Nov 25 09:58:32 crc kubenswrapper[4854]: E1125 09:58:32.545784 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33e7955b-280b-4907-a8ee-7fb1a46d6352" containerName="placement-db-sync" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.545804 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="33e7955b-280b-4907-a8ee-7fb1a46d6352" containerName="placement-db-sync" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.546125 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="33e7955b-280b-4907-a8ee-7fb1a46d6352" containerName="placement-db-sync" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.547729 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-557b4bfdc4-lt7bk" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.555412 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.555715 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.555783 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-q9wjs" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.555899 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.556017 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.576478 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-557b4bfdc4-lt7bk"] Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.616695 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/893affd4-934c-4901-933d-5b28b78ca519-logs\") pod \"placement-557b4bfdc4-lt7bk\" (UID: \"893affd4-934c-4901-933d-5b28b78ca519\") " pod="openstack/placement-557b4bfdc4-lt7bk" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.616740 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zzcsf\" (UniqueName: \"kubernetes.io/projected/893affd4-934c-4901-933d-5b28b78ca519-kube-api-access-zzcsf\") pod \"placement-557b4bfdc4-lt7bk\" (UID: \"893affd4-934c-4901-933d-5b28b78ca519\") " pod="openstack/placement-557b4bfdc4-lt7bk" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.616801 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/893affd4-934c-4901-933d-5b28b78ca519-public-tls-certs\") pod \"placement-557b4bfdc4-lt7bk\" (UID: \"893affd4-934c-4901-933d-5b28b78ca519\") " pod="openstack/placement-557b4bfdc4-lt7bk" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.616975 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/893affd4-934c-4901-933d-5b28b78ca519-config-data\") pod \"placement-557b4bfdc4-lt7bk\" (UID: \"893affd4-934c-4901-933d-5b28b78ca519\") " pod="openstack/placement-557b4bfdc4-lt7bk" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.617063 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/893affd4-934c-4901-933d-5b28b78ca519-scripts\") pod \"placement-557b4bfdc4-lt7bk\" (UID: \"893affd4-934c-4901-933d-5b28b78ca519\") " pod="openstack/placement-557b4bfdc4-lt7bk" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.617108 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/893affd4-934c-4901-933d-5b28b78ca519-internal-tls-certs\") pod \"placement-557b4bfdc4-lt7bk\" (UID: \"893affd4-934c-4901-933d-5b28b78ca519\") " pod="openstack/placement-557b4bfdc4-lt7bk" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.617295 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/893affd4-934c-4901-933d-5b28b78ca519-combined-ca-bundle\") pod \"placement-557b4bfdc4-lt7bk\" (UID: \"893affd4-934c-4901-933d-5b28b78ca519\") " pod="openstack/placement-557b4bfdc4-lt7bk" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.668967 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.669027 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.719608 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/893affd4-934c-4901-933d-5b28b78ca519-internal-tls-certs\") pod \"placement-557b4bfdc4-lt7bk\" (UID: \"893affd4-934c-4901-933d-5b28b78ca519\") " pod="openstack/placement-557b4bfdc4-lt7bk" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.719725 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/893affd4-934c-4901-933d-5b28b78ca519-combined-ca-bundle\") pod \"placement-557b4bfdc4-lt7bk\" (UID: \"893affd4-934c-4901-933d-5b28b78ca519\") " pod="openstack/placement-557b4bfdc4-lt7bk" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.719803 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/893affd4-934c-4901-933d-5b28b78ca519-logs\") pod \"placement-557b4bfdc4-lt7bk\" (UID: \"893affd4-934c-4901-933d-5b28b78ca519\") " pod="openstack/placement-557b4bfdc4-lt7bk" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.719850 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zzcsf\" (UniqueName: \"kubernetes.io/projected/893affd4-934c-4901-933d-5b28b78ca519-kube-api-access-zzcsf\") pod \"placement-557b4bfdc4-lt7bk\" (UID: \"893affd4-934c-4901-933d-5b28b78ca519\") " pod="openstack/placement-557b4bfdc4-lt7bk" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.719913 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/893affd4-934c-4901-933d-5b28b78ca519-public-tls-certs\") pod \"placement-557b4bfdc4-lt7bk\" (UID: \"893affd4-934c-4901-933d-5b28b78ca519\") " pod="openstack/placement-557b4bfdc4-lt7bk" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.719963 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/893affd4-934c-4901-933d-5b28b78ca519-config-data\") pod \"placement-557b4bfdc4-lt7bk\" (UID: \"893affd4-934c-4901-933d-5b28b78ca519\") " pod="openstack/placement-557b4bfdc4-lt7bk" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.720006 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/893affd4-934c-4901-933d-5b28b78ca519-scripts\") pod \"placement-557b4bfdc4-lt7bk\" (UID: \"893affd4-934c-4901-933d-5b28b78ca519\") " pod="openstack/placement-557b4bfdc4-lt7bk" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.722467 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/893affd4-934c-4901-933d-5b28b78ca519-logs\") pod \"placement-557b4bfdc4-lt7bk\" (UID: \"893affd4-934c-4901-933d-5b28b78ca519\") " pod="openstack/placement-557b4bfdc4-lt7bk" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.756830 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/893affd4-934c-4901-933d-5b28b78ca519-public-tls-certs\") pod \"placement-557b4bfdc4-lt7bk\" (UID: \"893affd4-934c-4901-933d-5b28b78ca519\") " pod="openstack/placement-557b4bfdc4-lt7bk" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.760421 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/893affd4-934c-4901-933d-5b28b78ca519-config-data\") pod \"placement-557b4bfdc4-lt7bk\" (UID: \"893affd4-934c-4901-933d-5b28b78ca519\") " pod="openstack/placement-557b4bfdc4-lt7bk" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.760768 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/893affd4-934c-4901-933d-5b28b78ca519-scripts\") pod \"placement-557b4bfdc4-lt7bk\" (UID: \"893affd4-934c-4901-933d-5b28b78ca519\") " pod="openstack/placement-557b4bfdc4-lt7bk" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.761192 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zzcsf\" (UniqueName: \"kubernetes.io/projected/893affd4-934c-4901-933d-5b28b78ca519-kube-api-access-zzcsf\") pod \"placement-557b4bfdc4-lt7bk\" (UID: \"893affd4-934c-4901-933d-5b28b78ca519\") " pod="openstack/placement-557b4bfdc4-lt7bk" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.769509 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/893affd4-934c-4901-933d-5b28b78ca519-combined-ca-bundle\") pod \"placement-557b4bfdc4-lt7bk\" (UID: \"893affd4-934c-4901-933d-5b28b78ca519\") " pod="openstack/placement-557b4bfdc4-lt7bk" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.796344 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/893affd4-934c-4901-933d-5b28b78ca519-internal-tls-certs\") pod \"placement-557b4bfdc4-lt7bk\" (UID: \"893affd4-934c-4901-933d-5b28b78ca519\") " pod="openstack/placement-557b4bfdc4-lt7bk" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.890658 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.893033 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 25 09:58:32 crc kubenswrapper[4854]: I1125 09:58:32.923736 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-557b4bfdc4-lt7bk" Nov 25 09:58:33 crc kubenswrapper[4854]: I1125 09:58:33.462584 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-557b4bfdc4-lt7bk"] Nov 25 09:58:33 crc kubenswrapper[4854]: I1125 09:58:33.462844 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c7l4t" event={"ID":"bb7eee5b-75e6-483b-a68e-1d6e39814690","Type":"ContainerDied","Data":"197a7e26a217fb846260cbf5d78440d2cb98e2bd87d1290fe2588006a595a810"} Nov 25 09:58:33 crc kubenswrapper[4854]: I1125 09:58:33.463406 4854 generic.go:334] "Generic (PLEG): container finished" podID="bb7eee5b-75e6-483b-a68e-1d6e39814690" containerID="197a7e26a217fb846260cbf5d78440d2cb98e2bd87d1290fe2588006a595a810" exitCode=0 Nov 25 09:58:33 crc kubenswrapper[4854]: I1125 09:58:33.463599 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c7l4t" event={"ID":"bb7eee5b-75e6-483b-a68e-1d6e39814690","Type":"ContainerStarted","Data":"952c31334cacf038348868a85287ab05aa0456ae2e7c34822d47a2fa60ab4a03"} Nov 25 09:58:33 crc kubenswrapper[4854]: I1125 09:58:33.474610 4854 generic.go:334] "Generic (PLEG): container finished" podID="3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98" containerID="10f7ec26259addf257420fc5f9b059d31430876f4caed02a03148e2aa636e30a" exitCode=0 Nov 25 09:58:33 crc kubenswrapper[4854]: I1125 09:58:33.478258 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-h58qt" event={"ID":"3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98","Type":"ContainerDied","Data":"10f7ec26259addf257420fc5f9b059d31430876f4caed02a03148e2aa636e30a"} Nov 25 09:58:33 crc kubenswrapper[4854]: I1125 09:58:33.478370 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 09:58:33 crc kubenswrapper[4854]: I1125 09:58:33.478416 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 09:58:34 crc kubenswrapper[4854]: I1125 09:58:34.492466 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-557b4bfdc4-lt7bk" event={"ID":"893affd4-934c-4901-933d-5b28b78ca519","Type":"ContainerStarted","Data":"4858785959aae5e55c06de64fb55b7cbb719dee6757d45ef5031431203e3f8c2"} Nov 25 09:58:34 crc kubenswrapper[4854]: I1125 09:58:34.492747 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-557b4bfdc4-lt7bk" event={"ID":"893affd4-934c-4901-933d-5b28b78ca519","Type":"ContainerStarted","Data":"95f39b6df372ce093464886de9120572ab67291bebd114129062f99e376a6edd"} Nov 25 09:58:34 crc kubenswrapper[4854]: I1125 09:58:34.492758 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-557b4bfdc4-lt7bk" event={"ID":"893affd4-934c-4901-933d-5b28b78ca519","Type":"ContainerStarted","Data":"774f7862acb61fd93f28b7fa0ed3916efab131d809c166249a0fc61dcb233f2b"} Nov 25 09:58:34 crc kubenswrapper[4854]: I1125 09:58:34.518861 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-557b4bfdc4-lt7bk" podStartSLOduration=2.51884137 podStartE2EDuration="2.51884137s" podCreationTimestamp="2025-11-25 09:58:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:58:34.518447808 +0000 UTC m=+1320.371441184" watchObservedRunningTime="2025-11-25 09:58:34.51884137 +0000 UTC m=+1320.371834756" Nov 25 09:58:34 crc kubenswrapper[4854]: I1125 09:58:34.959561 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-h58qt" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.100542 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98-scripts\") pod \"3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98\" (UID: \"3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98\") " Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.100602 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98-combined-ca-bundle\") pod \"3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98\" (UID: \"3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98\") " Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.100704 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98-config-data\") pod \"3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98\" (UID: \"3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98\") " Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.100826 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f4hdx\" (UniqueName: \"kubernetes.io/projected/3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98-kube-api-access-f4hdx\") pod \"3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98\" (UID: \"3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98\") " Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.100865 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98-fernet-keys\") pod \"3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98\" (UID: \"3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98\") " Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.100927 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98-credential-keys\") pod \"3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98\" (UID: \"3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98\") " Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.109783 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98-scripts" (OuterVolumeSpecName: "scripts") pod "3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98" (UID: "3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.127840 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98" (UID: "3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.131863 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98" (UID: "3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.133063 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98-kube-api-access-f4hdx" (OuterVolumeSpecName: "kube-api-access-f4hdx") pod "3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98" (UID: "3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98"). InnerVolumeSpecName "kube-api-access-f4hdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.140935 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98-config-data" (OuterVolumeSpecName: "config-data") pod "3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98" (UID: "3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.161125 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98" (UID: "3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.204756 4854 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.204793 4854 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.204832 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.204844 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.204857 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f4hdx\" (UniqueName: \"kubernetes.io/projected/3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98-kube-api-access-f4hdx\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.204870 4854 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.531694 4854 generic.go:334] "Generic (PLEG): container finished" podID="70897159-9d6f-44cc-9b46-5f6a5d18fd8b" containerID="089d2a430f10afef4cfd498ffb2b28f64c94c7dfd45e7109263a3e93173a32a3" exitCode=0 Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.531751 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-hkj5t" event={"ID":"70897159-9d6f-44cc-9b46-5f6a5d18fd8b","Type":"ContainerDied","Data":"089d2a430f10afef4cfd498ffb2b28f64c94c7dfd45e7109263a3e93173a32a3"} Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.536455 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-h58qt" event={"ID":"3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98","Type":"ContainerDied","Data":"8b95ed172f66873363ec7441a58615fd7caa65d962123e0877b89533aa2603ba"} Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.536478 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8b95ed172f66873363ec7441a58615fd7caa65d962123e0877b89533aa2603ba" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.536510 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-h58qt" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.548745 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c7l4t" event={"ID":"bb7eee5b-75e6-483b-a68e-1d6e39814690","Type":"ContainerStarted","Data":"6a039f9dc44fb5dfaaaf7b1ecef5eb7f72b10bb71aad1c7da087a002ba5b9154"} Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.549427 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-557b4bfdc4-lt7bk" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.549486 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-557b4bfdc4-lt7bk" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.731640 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-6775cd4556-vz69t"] Nov 25 09:58:35 crc kubenswrapper[4854]: E1125 09:58:35.732780 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98" containerName="keystone-bootstrap" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.732952 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98" containerName="keystone-bootstrap" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.733321 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98" containerName="keystone-bootstrap" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.734478 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6775cd4556-vz69t" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.737382 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.738119 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.738543 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-wks95" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.738805 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.740328 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.741374 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.750320 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-6775cd4556-vz69t"] Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.841574 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c6297e85-6f7a-479d-9109-1e0f5c5a8cb8-fernet-keys\") pod \"keystone-6775cd4556-vz69t\" (UID: \"c6297e85-6f7a-479d-9109-1e0f5c5a8cb8\") " pod="openstack/keystone-6775cd4556-vz69t" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.841653 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c6297e85-6f7a-479d-9109-1e0f5c5a8cb8-credential-keys\") pod \"keystone-6775cd4556-vz69t\" (UID: \"c6297e85-6f7a-479d-9109-1e0f5c5a8cb8\") " pod="openstack/keystone-6775cd4556-vz69t" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.841772 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6297e85-6f7a-479d-9109-1e0f5c5a8cb8-public-tls-certs\") pod \"keystone-6775cd4556-vz69t\" (UID: \"c6297e85-6f7a-479d-9109-1e0f5c5a8cb8\") " pod="openstack/keystone-6775cd4556-vz69t" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.841802 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6297e85-6f7a-479d-9109-1e0f5c5a8cb8-internal-tls-certs\") pod \"keystone-6775cd4556-vz69t\" (UID: \"c6297e85-6f7a-479d-9109-1e0f5c5a8cb8\") " pod="openstack/keystone-6775cd4556-vz69t" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.841827 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6297e85-6f7a-479d-9109-1e0f5c5a8cb8-combined-ca-bundle\") pod \"keystone-6775cd4556-vz69t\" (UID: \"c6297e85-6f7a-479d-9109-1e0f5c5a8cb8\") " pod="openstack/keystone-6775cd4556-vz69t" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.841872 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-859rr\" (UniqueName: \"kubernetes.io/projected/c6297e85-6f7a-479d-9109-1e0f5c5a8cb8-kube-api-access-859rr\") pod \"keystone-6775cd4556-vz69t\" (UID: \"c6297e85-6f7a-479d-9109-1e0f5c5a8cb8\") " pod="openstack/keystone-6775cd4556-vz69t" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.841926 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6297e85-6f7a-479d-9109-1e0f5c5a8cb8-config-data\") pod \"keystone-6775cd4556-vz69t\" (UID: \"c6297e85-6f7a-479d-9109-1e0f5c5a8cb8\") " pod="openstack/keystone-6775cd4556-vz69t" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.841961 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c6297e85-6f7a-479d-9109-1e0f5c5a8cb8-scripts\") pod \"keystone-6775cd4556-vz69t\" (UID: \"c6297e85-6f7a-479d-9109-1e0f5c5a8cb8\") " pod="openstack/keystone-6775cd4556-vz69t" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.945150 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c6297e85-6f7a-479d-9109-1e0f5c5a8cb8-credential-keys\") pod \"keystone-6775cd4556-vz69t\" (UID: \"c6297e85-6f7a-479d-9109-1e0f5c5a8cb8\") " pod="openstack/keystone-6775cd4556-vz69t" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.945245 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6297e85-6f7a-479d-9109-1e0f5c5a8cb8-public-tls-certs\") pod \"keystone-6775cd4556-vz69t\" (UID: \"c6297e85-6f7a-479d-9109-1e0f5c5a8cb8\") " pod="openstack/keystone-6775cd4556-vz69t" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.945280 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6297e85-6f7a-479d-9109-1e0f5c5a8cb8-internal-tls-certs\") pod \"keystone-6775cd4556-vz69t\" (UID: \"c6297e85-6f7a-479d-9109-1e0f5c5a8cb8\") " pod="openstack/keystone-6775cd4556-vz69t" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.945306 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6297e85-6f7a-479d-9109-1e0f5c5a8cb8-combined-ca-bundle\") pod \"keystone-6775cd4556-vz69t\" (UID: \"c6297e85-6f7a-479d-9109-1e0f5c5a8cb8\") " pod="openstack/keystone-6775cd4556-vz69t" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.945361 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-859rr\" (UniqueName: \"kubernetes.io/projected/c6297e85-6f7a-479d-9109-1e0f5c5a8cb8-kube-api-access-859rr\") pod \"keystone-6775cd4556-vz69t\" (UID: \"c6297e85-6f7a-479d-9109-1e0f5c5a8cb8\") " pod="openstack/keystone-6775cd4556-vz69t" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.945423 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6297e85-6f7a-479d-9109-1e0f5c5a8cb8-config-data\") pod \"keystone-6775cd4556-vz69t\" (UID: \"c6297e85-6f7a-479d-9109-1e0f5c5a8cb8\") " pod="openstack/keystone-6775cd4556-vz69t" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.945469 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c6297e85-6f7a-479d-9109-1e0f5c5a8cb8-scripts\") pod \"keystone-6775cd4556-vz69t\" (UID: \"c6297e85-6f7a-479d-9109-1e0f5c5a8cb8\") " pod="openstack/keystone-6775cd4556-vz69t" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.945640 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c6297e85-6f7a-479d-9109-1e0f5c5a8cb8-fernet-keys\") pod \"keystone-6775cd4556-vz69t\" (UID: \"c6297e85-6f7a-479d-9109-1e0f5c5a8cb8\") " pod="openstack/keystone-6775cd4556-vz69t" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.955719 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c6297e85-6f7a-479d-9109-1e0f5c5a8cb8-credential-keys\") pod \"keystone-6775cd4556-vz69t\" (UID: \"c6297e85-6f7a-479d-9109-1e0f5c5a8cb8\") " pod="openstack/keystone-6775cd4556-vz69t" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.958504 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6297e85-6f7a-479d-9109-1e0f5c5a8cb8-config-data\") pod \"keystone-6775cd4556-vz69t\" (UID: \"c6297e85-6f7a-479d-9109-1e0f5c5a8cb8\") " pod="openstack/keystone-6775cd4556-vz69t" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.971041 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c6297e85-6f7a-479d-9109-1e0f5c5a8cb8-scripts\") pod \"keystone-6775cd4556-vz69t\" (UID: \"c6297e85-6f7a-479d-9109-1e0f5c5a8cb8\") " pod="openstack/keystone-6775cd4556-vz69t" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.971467 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6297e85-6f7a-479d-9109-1e0f5c5a8cb8-combined-ca-bundle\") pod \"keystone-6775cd4556-vz69t\" (UID: \"c6297e85-6f7a-479d-9109-1e0f5c5a8cb8\") " pod="openstack/keystone-6775cd4556-vz69t" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.971491 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c6297e85-6f7a-479d-9109-1e0f5c5a8cb8-fernet-keys\") pod \"keystone-6775cd4556-vz69t\" (UID: \"c6297e85-6f7a-479d-9109-1e0f5c5a8cb8\") " pod="openstack/keystone-6775cd4556-vz69t" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.971861 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6297e85-6f7a-479d-9109-1e0f5c5a8cb8-public-tls-certs\") pod \"keystone-6775cd4556-vz69t\" (UID: \"c6297e85-6f7a-479d-9109-1e0f5c5a8cb8\") " pod="openstack/keystone-6775cd4556-vz69t" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.971892 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6297e85-6f7a-479d-9109-1e0f5c5a8cb8-internal-tls-certs\") pod \"keystone-6775cd4556-vz69t\" (UID: \"c6297e85-6f7a-479d-9109-1e0f5c5a8cb8\") " pod="openstack/keystone-6775cd4556-vz69t" Nov 25 09:58:35 crc kubenswrapper[4854]: I1125 09:58:35.978213 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-859rr\" (UniqueName: \"kubernetes.io/projected/c6297e85-6f7a-479d-9109-1e0f5c5a8cb8-kube-api-access-859rr\") pod \"keystone-6775cd4556-vz69t\" (UID: \"c6297e85-6f7a-479d-9109-1e0f5c5a8cb8\") " pod="openstack/keystone-6775cd4556-vz69t" Nov 25 09:58:36 crc kubenswrapper[4854]: I1125 09:58:36.081879 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6775cd4556-vz69t" Nov 25 09:58:36 crc kubenswrapper[4854]: I1125 09:58:36.715617 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-6775cd4556-vz69t"] Nov 25 09:58:36 crc kubenswrapper[4854]: W1125 09:58:36.725399 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc6297e85_6f7a_479d_9109_1e0f5c5a8cb8.slice/crio-cdaaae94c02a955b2c6c78828c4a7b12ef13faac1f8b7e628751d81dc68bfe10 WatchSource:0}: Error finding container cdaaae94c02a955b2c6c78828c4a7b12ef13faac1f8b7e628751d81dc68bfe10: Status 404 returned error can't find the container with id cdaaae94c02a955b2c6c78828c4a7b12ef13faac1f8b7e628751d81dc68bfe10 Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.055066 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-hkj5t" Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.191507 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x25l4\" (UniqueName: \"kubernetes.io/projected/70897159-9d6f-44cc-9b46-5f6a5d18fd8b-kube-api-access-x25l4\") pod \"70897159-9d6f-44cc-9b46-5f6a5d18fd8b\" (UID: \"70897159-9d6f-44cc-9b46-5f6a5d18fd8b\") " Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.191685 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70897159-9d6f-44cc-9b46-5f6a5d18fd8b-combined-ca-bundle\") pod \"70897159-9d6f-44cc-9b46-5f6a5d18fd8b\" (UID: \"70897159-9d6f-44cc-9b46-5f6a5d18fd8b\") " Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.191819 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/70897159-9d6f-44cc-9b46-5f6a5d18fd8b-db-sync-config-data\") pod \"70897159-9d6f-44cc-9b46-5f6a5d18fd8b\" (UID: \"70897159-9d6f-44cc-9b46-5f6a5d18fd8b\") " Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.199181 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/70897159-9d6f-44cc-9b46-5f6a5d18fd8b-kube-api-access-x25l4" (OuterVolumeSpecName: "kube-api-access-x25l4") pod "70897159-9d6f-44cc-9b46-5f6a5d18fd8b" (UID: "70897159-9d6f-44cc-9b46-5f6a5d18fd8b"). InnerVolumeSpecName "kube-api-access-x25l4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.199584 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/70897159-9d6f-44cc-9b46-5f6a5d18fd8b-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "70897159-9d6f-44cc-9b46-5f6a5d18fd8b" (UID: "70897159-9d6f-44cc-9b46-5f6a5d18fd8b"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.202469 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x25l4\" (UniqueName: \"kubernetes.io/projected/70897159-9d6f-44cc-9b46-5f6a5d18fd8b-kube-api-access-x25l4\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.202506 4854 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/70897159-9d6f-44cc-9b46-5f6a5d18fd8b-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.244367 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/70897159-9d6f-44cc-9b46-5f6a5d18fd8b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "70897159-9d6f-44cc-9b46-5f6a5d18fd8b" (UID: "70897159-9d6f-44cc-9b46-5f6a5d18fd8b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.305382 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70897159-9d6f-44cc-9b46-5f6a5d18fd8b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.601765 4854 generic.go:334] "Generic (PLEG): container finished" podID="bb7eee5b-75e6-483b-a68e-1d6e39814690" containerID="6a039f9dc44fb5dfaaaf7b1ecef5eb7f72b10bb71aad1c7da087a002ba5b9154" exitCode=0 Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.601848 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c7l4t" event={"ID":"bb7eee5b-75e6-483b-a68e-1d6e39814690","Type":"ContainerDied","Data":"6a039f9dc44fb5dfaaaf7b1ecef5eb7f72b10bb71aad1c7da087a002ba5b9154"} Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.607267 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-hkj5t" event={"ID":"70897159-9d6f-44cc-9b46-5f6a5d18fd8b","Type":"ContainerDied","Data":"78193695004fd85e472cf058eed7bc609224e1b5671d71c1309243dd4910f2aa"} Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.607558 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="78193695004fd85e472cf058eed7bc609224e1b5671d71c1309243dd4910f2aa" Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.607289 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-hkj5t" Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.619989 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6775cd4556-vz69t" event={"ID":"c6297e85-6f7a-479d-9109-1e0f5c5a8cb8","Type":"ContainerStarted","Data":"ad5eda2aca334c2c558e75431a346862b1bdc2511de0da640634d2e42d3bbe84"} Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.620048 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6775cd4556-vz69t" event={"ID":"c6297e85-6f7a-479d-9109-1e0f5c5a8cb8","Type":"ContainerStarted","Data":"cdaaae94c02a955b2c6c78828c4a7b12ef13faac1f8b7e628751d81dc68bfe10"} Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.771893 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-586fb5bfc9-zcsm7"] Nov 25 09:58:37 crc kubenswrapper[4854]: E1125 09:58:37.772697 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70897159-9d6f-44cc-9b46-5f6a5d18fd8b" containerName="barbican-db-sync" Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.772713 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="70897159-9d6f-44cc-9b46-5f6a5d18fd8b" containerName="barbican-db-sync" Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.773121 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="70897159-9d6f-44cc-9b46-5f6a5d18fd8b" containerName="barbican-db-sync" Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.774781 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-586fb5bfc9-zcsm7" Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.787824 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-v8f9c" Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.788078 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.787827 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.799124 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-84d58997f8-bpzfk"] Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.817368 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-84d58997f8-bpzfk" Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.829194 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.873503 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-586fb5bfc9-zcsm7"] Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.902803 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-84d58997f8-bpzfk"] Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.919838 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d226a10-004d-4b8c-8282-4f7955c8d41f-config-data\") pod \"barbican-keystone-listener-84d58997f8-bpzfk\" (UID: \"3d226a10-004d-4b8c-8282-4f7955c8d41f\") " pod="openstack/barbican-keystone-listener-84d58997f8-bpzfk" Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.919906 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3d226a10-004d-4b8c-8282-4f7955c8d41f-config-data-custom\") pod \"barbican-keystone-listener-84d58997f8-bpzfk\" (UID: \"3d226a10-004d-4b8c-8282-4f7955c8d41f\") " pod="openstack/barbican-keystone-listener-84d58997f8-bpzfk" Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.919954 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3d226a10-004d-4b8c-8282-4f7955c8d41f-logs\") pod \"barbican-keystone-listener-84d58997f8-bpzfk\" (UID: \"3d226a10-004d-4b8c-8282-4f7955c8d41f\") " pod="openstack/barbican-keystone-listener-84d58997f8-bpzfk" Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.919975 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fd27c53c-e9d6-40de-9d4c-fea018061c07-logs\") pod \"barbican-worker-586fb5bfc9-zcsm7\" (UID: \"fd27c53c-e9d6-40de-9d4c-fea018061c07\") " pod="openstack/barbican-worker-586fb5bfc9-zcsm7" Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.919998 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d226a10-004d-4b8c-8282-4f7955c8d41f-combined-ca-bundle\") pod \"barbican-keystone-listener-84d58997f8-bpzfk\" (UID: \"3d226a10-004d-4b8c-8282-4f7955c8d41f\") " pod="openstack/barbican-keystone-listener-84d58997f8-bpzfk" Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.920025 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd27c53c-e9d6-40de-9d4c-fea018061c07-combined-ca-bundle\") pod \"barbican-worker-586fb5bfc9-zcsm7\" (UID: \"fd27c53c-e9d6-40de-9d4c-fea018061c07\") " pod="openstack/barbican-worker-586fb5bfc9-zcsm7" Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.920046 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fd27c53c-e9d6-40de-9d4c-fea018061c07-config-data-custom\") pod \"barbican-worker-586fb5bfc9-zcsm7\" (UID: \"fd27c53c-e9d6-40de-9d4c-fea018061c07\") " pod="openstack/barbican-worker-586fb5bfc9-zcsm7" Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.920135 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd27c53c-e9d6-40de-9d4c-fea018061c07-config-data\") pod \"barbican-worker-586fb5bfc9-zcsm7\" (UID: \"fd27c53c-e9d6-40de-9d4c-fea018061c07\") " pod="openstack/barbican-worker-586fb5bfc9-zcsm7" Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.920153 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p99rf\" (UniqueName: \"kubernetes.io/projected/fd27c53c-e9d6-40de-9d4c-fea018061c07-kube-api-access-p99rf\") pod \"barbican-worker-586fb5bfc9-zcsm7\" (UID: \"fd27c53c-e9d6-40de-9d4c-fea018061c07\") " pod="openstack/barbican-worker-586fb5bfc9-zcsm7" Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.920201 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4q2gx\" (UniqueName: \"kubernetes.io/projected/3d226a10-004d-4b8c-8282-4f7955c8d41f-kube-api-access-4q2gx\") pod \"barbican-keystone-listener-84d58997f8-bpzfk\" (UID: \"3d226a10-004d-4b8c-8282-4f7955c8d41f\") " pod="openstack/barbican-keystone-listener-84d58997f8-bpzfk" Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.963823 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7c67bffd47-9zxbd"] Nov 25 09:58:37 crc kubenswrapper[4854]: I1125 09:58:37.965745 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c67bffd47-9zxbd" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.018756 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c67bffd47-9zxbd"] Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.028948 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd27c53c-e9d6-40de-9d4c-fea018061c07-config-data\") pod \"barbican-worker-586fb5bfc9-zcsm7\" (UID: \"fd27c53c-e9d6-40de-9d4c-fea018061c07\") " pod="openstack/barbican-worker-586fb5bfc9-zcsm7" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.028991 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p99rf\" (UniqueName: \"kubernetes.io/projected/fd27c53c-e9d6-40de-9d4c-fea018061c07-kube-api-access-p99rf\") pod \"barbican-worker-586fb5bfc9-zcsm7\" (UID: \"fd27c53c-e9d6-40de-9d4c-fea018061c07\") " pod="openstack/barbican-worker-586fb5bfc9-zcsm7" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.029062 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4q2gx\" (UniqueName: \"kubernetes.io/projected/3d226a10-004d-4b8c-8282-4f7955c8d41f-kube-api-access-4q2gx\") pod \"barbican-keystone-listener-84d58997f8-bpzfk\" (UID: \"3d226a10-004d-4b8c-8282-4f7955c8d41f\") " pod="openstack/barbican-keystone-listener-84d58997f8-bpzfk" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.029140 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d226a10-004d-4b8c-8282-4f7955c8d41f-config-data\") pod \"barbican-keystone-listener-84d58997f8-bpzfk\" (UID: \"3d226a10-004d-4b8c-8282-4f7955c8d41f\") " pod="openstack/barbican-keystone-listener-84d58997f8-bpzfk" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.029163 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3d226a10-004d-4b8c-8282-4f7955c8d41f-config-data-custom\") pod \"barbican-keystone-listener-84d58997f8-bpzfk\" (UID: \"3d226a10-004d-4b8c-8282-4f7955c8d41f\") " pod="openstack/barbican-keystone-listener-84d58997f8-bpzfk" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.029199 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3d226a10-004d-4b8c-8282-4f7955c8d41f-logs\") pod \"barbican-keystone-listener-84d58997f8-bpzfk\" (UID: \"3d226a10-004d-4b8c-8282-4f7955c8d41f\") " pod="openstack/barbican-keystone-listener-84d58997f8-bpzfk" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.029213 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fd27c53c-e9d6-40de-9d4c-fea018061c07-logs\") pod \"barbican-worker-586fb5bfc9-zcsm7\" (UID: \"fd27c53c-e9d6-40de-9d4c-fea018061c07\") " pod="openstack/barbican-worker-586fb5bfc9-zcsm7" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.029233 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d226a10-004d-4b8c-8282-4f7955c8d41f-combined-ca-bundle\") pod \"barbican-keystone-listener-84d58997f8-bpzfk\" (UID: \"3d226a10-004d-4b8c-8282-4f7955c8d41f\") " pod="openstack/barbican-keystone-listener-84d58997f8-bpzfk" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.029254 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd27c53c-e9d6-40de-9d4c-fea018061c07-combined-ca-bundle\") pod \"barbican-worker-586fb5bfc9-zcsm7\" (UID: \"fd27c53c-e9d6-40de-9d4c-fea018061c07\") " pod="openstack/barbican-worker-586fb5bfc9-zcsm7" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.029276 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fd27c53c-e9d6-40de-9d4c-fea018061c07-config-data-custom\") pod \"barbican-worker-586fb5bfc9-zcsm7\" (UID: \"fd27c53c-e9d6-40de-9d4c-fea018061c07\") " pod="openstack/barbican-worker-586fb5bfc9-zcsm7" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.031838 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3d226a10-004d-4b8c-8282-4f7955c8d41f-logs\") pod \"barbican-keystone-listener-84d58997f8-bpzfk\" (UID: \"3d226a10-004d-4b8c-8282-4f7955c8d41f\") " pod="openstack/barbican-keystone-listener-84d58997f8-bpzfk" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.032331 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fd27c53c-e9d6-40de-9d4c-fea018061c07-logs\") pod \"barbican-worker-586fb5bfc9-zcsm7\" (UID: \"fd27c53c-e9d6-40de-9d4c-fea018061c07\") " pod="openstack/barbican-worker-586fb5bfc9-zcsm7" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.034729 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fd27c53c-e9d6-40de-9d4c-fea018061c07-config-data-custom\") pod \"barbican-worker-586fb5bfc9-zcsm7\" (UID: \"fd27c53c-e9d6-40de-9d4c-fea018061c07\") " pod="openstack/barbican-worker-586fb5bfc9-zcsm7" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.048415 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3d226a10-004d-4b8c-8282-4f7955c8d41f-config-data-custom\") pod \"barbican-keystone-listener-84d58997f8-bpzfk\" (UID: \"3d226a10-004d-4b8c-8282-4f7955c8d41f\") " pod="openstack/barbican-keystone-listener-84d58997f8-bpzfk" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.059753 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd27c53c-e9d6-40de-9d4c-fea018061c07-config-data\") pod \"barbican-worker-586fb5bfc9-zcsm7\" (UID: \"fd27c53c-e9d6-40de-9d4c-fea018061c07\") " pod="openstack/barbican-worker-586fb5bfc9-zcsm7" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.061489 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d226a10-004d-4b8c-8282-4f7955c8d41f-config-data\") pod \"barbican-keystone-listener-84d58997f8-bpzfk\" (UID: \"3d226a10-004d-4b8c-8282-4f7955c8d41f\") " pod="openstack/barbican-keystone-listener-84d58997f8-bpzfk" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.063918 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d226a10-004d-4b8c-8282-4f7955c8d41f-combined-ca-bundle\") pod \"barbican-keystone-listener-84d58997f8-bpzfk\" (UID: \"3d226a10-004d-4b8c-8282-4f7955c8d41f\") " pod="openstack/barbican-keystone-listener-84d58997f8-bpzfk" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.066906 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd27c53c-e9d6-40de-9d4c-fea018061c07-combined-ca-bundle\") pod \"barbican-worker-586fb5bfc9-zcsm7\" (UID: \"fd27c53c-e9d6-40de-9d4c-fea018061c07\") " pod="openstack/barbican-worker-586fb5bfc9-zcsm7" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.069373 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-6ccd654df4-nc9k5"] Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.071380 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6ccd654df4-nc9k5" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.073413 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4q2gx\" (UniqueName: \"kubernetes.io/projected/3d226a10-004d-4b8c-8282-4f7955c8d41f-kube-api-access-4q2gx\") pod \"barbican-keystone-listener-84d58997f8-bpzfk\" (UID: \"3d226a10-004d-4b8c-8282-4f7955c8d41f\") " pod="openstack/barbican-keystone-listener-84d58997f8-bpzfk" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.074989 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.102284 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6ccd654df4-nc9k5"] Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.103261 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p99rf\" (UniqueName: \"kubernetes.io/projected/fd27c53c-e9d6-40de-9d4c-fea018061c07-kube-api-access-p99rf\") pod \"barbican-worker-586fb5bfc9-zcsm7\" (UID: \"fd27c53c-e9d6-40de-9d4c-fea018061c07\") " pod="openstack/barbican-worker-586fb5bfc9-zcsm7" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.131815 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b33c7d9f-3d87-473f-b442-faeffed65c62-config\") pod \"dnsmasq-dns-7c67bffd47-9zxbd\" (UID: \"b33c7d9f-3d87-473f-b442-faeffed65c62\") " pod="openstack/dnsmasq-dns-7c67bffd47-9zxbd" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.131886 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b33c7d9f-3d87-473f-b442-faeffed65c62-dns-swift-storage-0\") pod \"dnsmasq-dns-7c67bffd47-9zxbd\" (UID: \"b33c7d9f-3d87-473f-b442-faeffed65c62\") " pod="openstack/dnsmasq-dns-7c67bffd47-9zxbd" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.131999 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b33c7d9f-3d87-473f-b442-faeffed65c62-ovsdbserver-nb\") pod \"dnsmasq-dns-7c67bffd47-9zxbd\" (UID: \"b33c7d9f-3d87-473f-b442-faeffed65c62\") " pod="openstack/dnsmasq-dns-7c67bffd47-9zxbd" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.133441 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b33c7d9f-3d87-473f-b442-faeffed65c62-dns-svc\") pod \"dnsmasq-dns-7c67bffd47-9zxbd\" (UID: \"b33c7d9f-3d87-473f-b442-faeffed65c62\") " pod="openstack/dnsmasq-dns-7c67bffd47-9zxbd" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.133601 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b33c7d9f-3d87-473f-b442-faeffed65c62-ovsdbserver-sb\") pod \"dnsmasq-dns-7c67bffd47-9zxbd\" (UID: \"b33c7d9f-3d87-473f-b442-faeffed65c62\") " pod="openstack/dnsmasq-dns-7c67bffd47-9zxbd" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.133691 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5h6j\" (UniqueName: \"kubernetes.io/projected/b33c7d9f-3d87-473f-b442-faeffed65c62-kube-api-access-g5h6j\") pod \"dnsmasq-dns-7c67bffd47-9zxbd\" (UID: \"b33c7d9f-3d87-473f-b442-faeffed65c62\") " pod="openstack/dnsmasq-dns-7c67bffd47-9zxbd" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.145736 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-586fb5bfc9-zcsm7" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.164939 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-84d58997f8-bpzfk" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.236294 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b33c7d9f-3d87-473f-b442-faeffed65c62-ovsdbserver-sb\") pod \"dnsmasq-dns-7c67bffd47-9zxbd\" (UID: \"b33c7d9f-3d87-473f-b442-faeffed65c62\") " pod="openstack/dnsmasq-dns-7c67bffd47-9zxbd" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.236365 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c0c3d44b-bac5-40fd-9edd-b6ee78913589-config-data-custom\") pod \"barbican-api-6ccd654df4-nc9k5\" (UID: \"c0c3d44b-bac5-40fd-9edd-b6ee78913589\") " pod="openstack/barbican-api-6ccd654df4-nc9k5" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.236400 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5h6j\" (UniqueName: \"kubernetes.io/projected/b33c7d9f-3d87-473f-b442-faeffed65c62-kube-api-access-g5h6j\") pod \"dnsmasq-dns-7c67bffd47-9zxbd\" (UID: \"b33c7d9f-3d87-473f-b442-faeffed65c62\") " pod="openstack/dnsmasq-dns-7c67bffd47-9zxbd" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.236480 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b33c7d9f-3d87-473f-b442-faeffed65c62-config\") pod \"dnsmasq-dns-7c67bffd47-9zxbd\" (UID: \"b33c7d9f-3d87-473f-b442-faeffed65c62\") " pod="openstack/dnsmasq-dns-7c67bffd47-9zxbd" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.236518 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b33c7d9f-3d87-473f-b442-faeffed65c62-dns-swift-storage-0\") pod \"dnsmasq-dns-7c67bffd47-9zxbd\" (UID: \"b33c7d9f-3d87-473f-b442-faeffed65c62\") " pod="openstack/dnsmasq-dns-7c67bffd47-9zxbd" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.236576 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8k2v5\" (UniqueName: \"kubernetes.io/projected/c0c3d44b-bac5-40fd-9edd-b6ee78913589-kube-api-access-8k2v5\") pod \"barbican-api-6ccd654df4-nc9k5\" (UID: \"c0c3d44b-bac5-40fd-9edd-b6ee78913589\") " pod="openstack/barbican-api-6ccd654df4-nc9k5" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.236626 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b33c7d9f-3d87-473f-b442-faeffed65c62-ovsdbserver-nb\") pod \"dnsmasq-dns-7c67bffd47-9zxbd\" (UID: \"b33c7d9f-3d87-473f-b442-faeffed65c62\") " pod="openstack/dnsmasq-dns-7c67bffd47-9zxbd" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.236662 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0c3d44b-bac5-40fd-9edd-b6ee78913589-combined-ca-bundle\") pod \"barbican-api-6ccd654df4-nc9k5\" (UID: \"c0c3d44b-bac5-40fd-9edd-b6ee78913589\") " pod="openstack/barbican-api-6ccd654df4-nc9k5" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.236766 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0c3d44b-bac5-40fd-9edd-b6ee78913589-config-data\") pod \"barbican-api-6ccd654df4-nc9k5\" (UID: \"c0c3d44b-bac5-40fd-9edd-b6ee78913589\") " pod="openstack/barbican-api-6ccd654df4-nc9k5" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.236787 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c0c3d44b-bac5-40fd-9edd-b6ee78913589-logs\") pod \"barbican-api-6ccd654df4-nc9k5\" (UID: \"c0c3d44b-bac5-40fd-9edd-b6ee78913589\") " pod="openstack/barbican-api-6ccd654df4-nc9k5" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.236831 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b33c7d9f-3d87-473f-b442-faeffed65c62-dns-svc\") pod \"dnsmasq-dns-7c67bffd47-9zxbd\" (UID: \"b33c7d9f-3d87-473f-b442-faeffed65c62\") " pod="openstack/dnsmasq-dns-7c67bffd47-9zxbd" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.237724 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b33c7d9f-3d87-473f-b442-faeffed65c62-dns-svc\") pod \"dnsmasq-dns-7c67bffd47-9zxbd\" (UID: \"b33c7d9f-3d87-473f-b442-faeffed65c62\") " pod="openstack/dnsmasq-dns-7c67bffd47-9zxbd" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.239445 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b33c7d9f-3d87-473f-b442-faeffed65c62-ovsdbserver-sb\") pod \"dnsmasq-dns-7c67bffd47-9zxbd\" (UID: \"b33c7d9f-3d87-473f-b442-faeffed65c62\") " pod="openstack/dnsmasq-dns-7c67bffd47-9zxbd" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.239587 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b33c7d9f-3d87-473f-b442-faeffed65c62-config\") pod \"dnsmasq-dns-7c67bffd47-9zxbd\" (UID: \"b33c7d9f-3d87-473f-b442-faeffed65c62\") " pod="openstack/dnsmasq-dns-7c67bffd47-9zxbd" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.240030 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b33c7d9f-3d87-473f-b442-faeffed65c62-ovsdbserver-nb\") pod \"dnsmasq-dns-7c67bffd47-9zxbd\" (UID: \"b33c7d9f-3d87-473f-b442-faeffed65c62\") " pod="openstack/dnsmasq-dns-7c67bffd47-9zxbd" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.240142 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b33c7d9f-3d87-473f-b442-faeffed65c62-dns-swift-storage-0\") pod \"dnsmasq-dns-7c67bffd47-9zxbd\" (UID: \"b33c7d9f-3d87-473f-b442-faeffed65c62\") " pod="openstack/dnsmasq-dns-7c67bffd47-9zxbd" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.258662 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5h6j\" (UniqueName: \"kubernetes.io/projected/b33c7d9f-3d87-473f-b442-faeffed65c62-kube-api-access-g5h6j\") pod \"dnsmasq-dns-7c67bffd47-9zxbd\" (UID: \"b33c7d9f-3d87-473f-b442-faeffed65c62\") " pod="openstack/dnsmasq-dns-7c67bffd47-9zxbd" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.327998 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c67bffd47-9zxbd" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.338509 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8k2v5\" (UniqueName: \"kubernetes.io/projected/c0c3d44b-bac5-40fd-9edd-b6ee78913589-kube-api-access-8k2v5\") pod \"barbican-api-6ccd654df4-nc9k5\" (UID: \"c0c3d44b-bac5-40fd-9edd-b6ee78913589\") " pod="openstack/barbican-api-6ccd654df4-nc9k5" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.338581 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0c3d44b-bac5-40fd-9edd-b6ee78913589-combined-ca-bundle\") pod \"barbican-api-6ccd654df4-nc9k5\" (UID: \"c0c3d44b-bac5-40fd-9edd-b6ee78913589\") " pod="openstack/barbican-api-6ccd654df4-nc9k5" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.338696 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0c3d44b-bac5-40fd-9edd-b6ee78913589-config-data\") pod \"barbican-api-6ccd654df4-nc9k5\" (UID: \"c0c3d44b-bac5-40fd-9edd-b6ee78913589\") " pod="openstack/barbican-api-6ccd654df4-nc9k5" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.338725 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c0c3d44b-bac5-40fd-9edd-b6ee78913589-logs\") pod \"barbican-api-6ccd654df4-nc9k5\" (UID: \"c0c3d44b-bac5-40fd-9edd-b6ee78913589\") " pod="openstack/barbican-api-6ccd654df4-nc9k5" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.338828 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c0c3d44b-bac5-40fd-9edd-b6ee78913589-config-data-custom\") pod \"barbican-api-6ccd654df4-nc9k5\" (UID: \"c0c3d44b-bac5-40fd-9edd-b6ee78913589\") " pod="openstack/barbican-api-6ccd654df4-nc9k5" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.340293 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c0c3d44b-bac5-40fd-9edd-b6ee78913589-logs\") pod \"barbican-api-6ccd654df4-nc9k5\" (UID: \"c0c3d44b-bac5-40fd-9edd-b6ee78913589\") " pod="openstack/barbican-api-6ccd654df4-nc9k5" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.347301 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c0c3d44b-bac5-40fd-9edd-b6ee78913589-config-data-custom\") pod \"barbican-api-6ccd654df4-nc9k5\" (UID: \"c0c3d44b-bac5-40fd-9edd-b6ee78913589\") " pod="openstack/barbican-api-6ccd654df4-nc9k5" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.349499 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0c3d44b-bac5-40fd-9edd-b6ee78913589-combined-ca-bundle\") pod \"barbican-api-6ccd654df4-nc9k5\" (UID: \"c0c3d44b-bac5-40fd-9edd-b6ee78913589\") " pod="openstack/barbican-api-6ccd654df4-nc9k5" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.352885 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0c3d44b-bac5-40fd-9edd-b6ee78913589-config-data\") pod \"barbican-api-6ccd654df4-nc9k5\" (UID: \"c0c3d44b-bac5-40fd-9edd-b6ee78913589\") " pod="openstack/barbican-api-6ccd654df4-nc9k5" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.400008 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8k2v5\" (UniqueName: \"kubernetes.io/projected/c0c3d44b-bac5-40fd-9edd-b6ee78913589-kube-api-access-8k2v5\") pod \"barbican-api-6ccd654df4-nc9k5\" (UID: \"c0c3d44b-bac5-40fd-9edd-b6ee78913589\") " pod="openstack/barbican-api-6ccd654df4-nc9k5" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.644902 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-6775cd4556-vz69t" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.646148 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6ccd654df4-nc9k5" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.692097 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-6775cd4556-vz69t" podStartSLOduration=3.692070184 podStartE2EDuration="3.692070184s" podCreationTimestamp="2025-11-25 09:58:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:58:38.683421886 +0000 UTC m=+1324.536415262" watchObservedRunningTime="2025-11-25 09:58:38.692070184 +0000 UTC m=+1324.545063560" Nov 25 09:58:38 crc kubenswrapper[4854]: I1125 09:58:38.987332 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-84d58997f8-bpzfk"] Nov 25 09:58:39 crc kubenswrapper[4854]: I1125 09:58:39.042627 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-586fb5bfc9-zcsm7"] Nov 25 09:58:39 crc kubenswrapper[4854]: I1125 09:58:39.137271 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c67bffd47-9zxbd"] Nov 25 09:58:39 crc kubenswrapper[4854]: I1125 09:58:39.292460 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6ccd654df4-nc9k5"] Nov 25 09:58:39 crc kubenswrapper[4854]: W1125 09:58:39.309028 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc0c3d44b_bac5_40fd_9edd_b6ee78913589.slice/crio-c4de81c7bbe2759f55db0e2224f5e8b8e0c778da56598b576569b0d341f70483 WatchSource:0}: Error finding container c4de81c7bbe2759f55db0e2224f5e8b8e0c778da56598b576569b0d341f70483: Status 404 returned error can't find the container with id c4de81c7bbe2759f55db0e2224f5e8b8e0c778da56598b576569b0d341f70483 Nov 25 09:58:39 crc kubenswrapper[4854]: I1125 09:58:39.653399 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-586fb5bfc9-zcsm7" event={"ID":"fd27c53c-e9d6-40de-9d4c-fea018061c07","Type":"ContainerStarted","Data":"377cb850ecc009e007b754709663be044eb23202cca0629b4437b7abaebd9b7e"} Nov 25 09:58:39 crc kubenswrapper[4854]: I1125 09:58:39.656467 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-84d58997f8-bpzfk" event={"ID":"3d226a10-004d-4b8c-8282-4f7955c8d41f","Type":"ContainerStarted","Data":"cc58bbfc52d861e035d7bf32cd8966cddba568708d9e891b9b1c69bd543f288d"} Nov 25 09:58:39 crc kubenswrapper[4854]: I1125 09:58:39.658773 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c67bffd47-9zxbd" event={"ID":"b33c7d9f-3d87-473f-b442-faeffed65c62","Type":"ContainerStarted","Data":"3045fab8051b564783e2fe8444deef7928fa3d48903982aaa3ab9decd76218a4"} Nov 25 09:58:39 crc kubenswrapper[4854]: I1125 09:58:39.658817 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c67bffd47-9zxbd" event={"ID":"b33c7d9f-3d87-473f-b442-faeffed65c62","Type":"ContainerStarted","Data":"e6b6e2fc018254447bdd1dd9fa337549565765336f2df74336d6f7ff5bc7dbf9"} Nov 25 09:58:39 crc kubenswrapper[4854]: I1125 09:58:39.663109 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6ccd654df4-nc9k5" event={"ID":"c0c3d44b-bac5-40fd-9edd-b6ee78913589","Type":"ContainerStarted","Data":"4c84c9a8a19f148b87f3870f4fa29699562bf00f45ca8675e43ba44ff1b39041"} Nov 25 09:58:39 crc kubenswrapper[4854]: I1125 09:58:39.663172 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6ccd654df4-nc9k5" event={"ID":"c0c3d44b-bac5-40fd-9edd-b6ee78913589","Type":"ContainerStarted","Data":"c4de81c7bbe2759f55db0e2224f5e8b8e0c778da56598b576569b0d341f70483"} Nov 25 09:58:40 crc kubenswrapper[4854]: I1125 09:58:40.684296 4854 generic.go:334] "Generic (PLEG): container finished" podID="b33c7d9f-3d87-473f-b442-faeffed65c62" containerID="3045fab8051b564783e2fe8444deef7928fa3d48903982aaa3ab9decd76218a4" exitCode=0 Nov 25 09:58:40 crc kubenswrapper[4854]: I1125 09:58:40.684376 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c67bffd47-9zxbd" event={"ID":"b33c7d9f-3d87-473f-b442-faeffed65c62","Type":"ContainerDied","Data":"3045fab8051b564783e2fe8444deef7928fa3d48903982aaa3ab9decd76218a4"} Nov 25 09:58:40 crc kubenswrapper[4854]: I1125 09:58:40.697610 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6ccd654df4-nc9k5" event={"ID":"c0c3d44b-bac5-40fd-9edd-b6ee78913589","Type":"ContainerStarted","Data":"e1cb8af2ade8375c3a0b844d1c55892e9de4a7ece0369d8de7777968d354e756"} Nov 25 09:58:40 crc kubenswrapper[4854]: I1125 09:58:40.698546 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6ccd654df4-nc9k5" Nov 25 09:58:40 crc kubenswrapper[4854]: I1125 09:58:40.698707 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6ccd654df4-nc9k5" Nov 25 09:58:40 crc kubenswrapper[4854]: I1125 09:58:40.750105 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-6ccd654df4-nc9k5" podStartSLOduration=3.75008303 podStartE2EDuration="3.75008303s" podCreationTimestamp="2025-11-25 09:58:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:58:40.729634947 +0000 UTC m=+1326.582628333" watchObservedRunningTime="2025-11-25 09:58:40.75008303 +0000 UTC m=+1326.603076416" Nov 25 09:58:41 crc kubenswrapper[4854]: I1125 09:58:41.434884 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-dd6ff4df6-gfkgf"] Nov 25 09:58:41 crc kubenswrapper[4854]: I1125 09:58:41.437184 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-dd6ff4df6-gfkgf" Nov 25 09:58:41 crc kubenswrapper[4854]: I1125 09:58:41.441282 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 25 09:58:41 crc kubenswrapper[4854]: I1125 09:58:41.441634 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 25 09:58:41 crc kubenswrapper[4854]: I1125 09:58:41.452535 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-dd6ff4df6-gfkgf"] Nov 25 09:58:41 crc kubenswrapper[4854]: I1125 09:58:41.563092 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/941bda47-168e-496a-b60f-c4edb4560bcc-public-tls-certs\") pod \"barbican-api-dd6ff4df6-gfkgf\" (UID: \"941bda47-168e-496a-b60f-c4edb4560bcc\") " pod="openstack/barbican-api-dd6ff4df6-gfkgf" Nov 25 09:58:41 crc kubenswrapper[4854]: I1125 09:58:41.563163 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/941bda47-168e-496a-b60f-c4edb4560bcc-logs\") pod \"barbican-api-dd6ff4df6-gfkgf\" (UID: \"941bda47-168e-496a-b60f-c4edb4560bcc\") " pod="openstack/barbican-api-dd6ff4df6-gfkgf" Nov 25 09:58:41 crc kubenswrapper[4854]: I1125 09:58:41.563199 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/941bda47-168e-496a-b60f-c4edb4560bcc-config-data-custom\") pod \"barbican-api-dd6ff4df6-gfkgf\" (UID: \"941bda47-168e-496a-b60f-c4edb4560bcc\") " pod="openstack/barbican-api-dd6ff4df6-gfkgf" Nov 25 09:58:41 crc kubenswrapper[4854]: I1125 09:58:41.563498 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/941bda47-168e-496a-b60f-c4edb4560bcc-combined-ca-bundle\") pod \"barbican-api-dd6ff4df6-gfkgf\" (UID: \"941bda47-168e-496a-b60f-c4edb4560bcc\") " pod="openstack/barbican-api-dd6ff4df6-gfkgf" Nov 25 09:58:41 crc kubenswrapper[4854]: I1125 09:58:41.563580 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/941bda47-168e-496a-b60f-c4edb4560bcc-config-data\") pod \"barbican-api-dd6ff4df6-gfkgf\" (UID: \"941bda47-168e-496a-b60f-c4edb4560bcc\") " pod="openstack/barbican-api-dd6ff4df6-gfkgf" Nov 25 09:58:41 crc kubenswrapper[4854]: I1125 09:58:41.563637 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9r7q6\" (UniqueName: \"kubernetes.io/projected/941bda47-168e-496a-b60f-c4edb4560bcc-kube-api-access-9r7q6\") pod \"barbican-api-dd6ff4df6-gfkgf\" (UID: \"941bda47-168e-496a-b60f-c4edb4560bcc\") " pod="openstack/barbican-api-dd6ff4df6-gfkgf" Nov 25 09:58:41 crc kubenswrapper[4854]: I1125 09:58:41.563845 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/941bda47-168e-496a-b60f-c4edb4560bcc-internal-tls-certs\") pod \"barbican-api-dd6ff4df6-gfkgf\" (UID: \"941bda47-168e-496a-b60f-c4edb4560bcc\") " pod="openstack/barbican-api-dd6ff4df6-gfkgf" Nov 25 09:58:41 crc kubenswrapper[4854]: I1125 09:58:41.666017 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/941bda47-168e-496a-b60f-c4edb4560bcc-public-tls-certs\") pod \"barbican-api-dd6ff4df6-gfkgf\" (UID: \"941bda47-168e-496a-b60f-c4edb4560bcc\") " pod="openstack/barbican-api-dd6ff4df6-gfkgf" Nov 25 09:58:41 crc kubenswrapper[4854]: I1125 09:58:41.666116 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/941bda47-168e-496a-b60f-c4edb4560bcc-logs\") pod \"barbican-api-dd6ff4df6-gfkgf\" (UID: \"941bda47-168e-496a-b60f-c4edb4560bcc\") " pod="openstack/barbican-api-dd6ff4df6-gfkgf" Nov 25 09:58:41 crc kubenswrapper[4854]: I1125 09:58:41.666180 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/941bda47-168e-496a-b60f-c4edb4560bcc-config-data-custom\") pod \"barbican-api-dd6ff4df6-gfkgf\" (UID: \"941bda47-168e-496a-b60f-c4edb4560bcc\") " pod="openstack/barbican-api-dd6ff4df6-gfkgf" Nov 25 09:58:41 crc kubenswrapper[4854]: I1125 09:58:41.666341 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/941bda47-168e-496a-b60f-c4edb4560bcc-combined-ca-bundle\") pod \"barbican-api-dd6ff4df6-gfkgf\" (UID: \"941bda47-168e-496a-b60f-c4edb4560bcc\") " pod="openstack/barbican-api-dd6ff4df6-gfkgf" Nov 25 09:58:41 crc kubenswrapper[4854]: I1125 09:58:41.666366 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/941bda47-168e-496a-b60f-c4edb4560bcc-config-data\") pod \"barbican-api-dd6ff4df6-gfkgf\" (UID: \"941bda47-168e-496a-b60f-c4edb4560bcc\") " pod="openstack/barbican-api-dd6ff4df6-gfkgf" Nov 25 09:58:41 crc kubenswrapper[4854]: I1125 09:58:41.666398 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9r7q6\" (UniqueName: \"kubernetes.io/projected/941bda47-168e-496a-b60f-c4edb4560bcc-kube-api-access-9r7q6\") pod \"barbican-api-dd6ff4df6-gfkgf\" (UID: \"941bda47-168e-496a-b60f-c4edb4560bcc\") " pod="openstack/barbican-api-dd6ff4df6-gfkgf" Nov 25 09:58:41 crc kubenswrapper[4854]: I1125 09:58:41.666489 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/941bda47-168e-496a-b60f-c4edb4560bcc-internal-tls-certs\") pod \"barbican-api-dd6ff4df6-gfkgf\" (UID: \"941bda47-168e-496a-b60f-c4edb4560bcc\") " pod="openstack/barbican-api-dd6ff4df6-gfkgf" Nov 25 09:58:41 crc kubenswrapper[4854]: I1125 09:58:41.681880 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/941bda47-168e-496a-b60f-c4edb4560bcc-config-data\") pod \"barbican-api-dd6ff4df6-gfkgf\" (UID: \"941bda47-168e-496a-b60f-c4edb4560bcc\") " pod="openstack/barbican-api-dd6ff4df6-gfkgf" Nov 25 09:58:41 crc kubenswrapper[4854]: I1125 09:58:41.682259 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/941bda47-168e-496a-b60f-c4edb4560bcc-logs\") pod \"barbican-api-dd6ff4df6-gfkgf\" (UID: \"941bda47-168e-496a-b60f-c4edb4560bcc\") " pod="openstack/barbican-api-dd6ff4df6-gfkgf" Nov 25 09:58:41 crc kubenswrapper[4854]: I1125 09:58:41.696341 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/941bda47-168e-496a-b60f-c4edb4560bcc-combined-ca-bundle\") pod \"barbican-api-dd6ff4df6-gfkgf\" (UID: \"941bda47-168e-496a-b60f-c4edb4560bcc\") " pod="openstack/barbican-api-dd6ff4df6-gfkgf" Nov 25 09:58:41 crc kubenswrapper[4854]: I1125 09:58:41.698876 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/941bda47-168e-496a-b60f-c4edb4560bcc-public-tls-certs\") pod \"barbican-api-dd6ff4df6-gfkgf\" (UID: \"941bda47-168e-496a-b60f-c4edb4560bcc\") " pod="openstack/barbican-api-dd6ff4df6-gfkgf" Nov 25 09:58:41 crc kubenswrapper[4854]: I1125 09:58:41.704461 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/941bda47-168e-496a-b60f-c4edb4560bcc-config-data-custom\") pod \"barbican-api-dd6ff4df6-gfkgf\" (UID: \"941bda47-168e-496a-b60f-c4edb4560bcc\") " pod="openstack/barbican-api-dd6ff4df6-gfkgf" Nov 25 09:58:41 crc kubenswrapper[4854]: I1125 09:58:41.710864 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/941bda47-168e-496a-b60f-c4edb4560bcc-internal-tls-certs\") pod \"barbican-api-dd6ff4df6-gfkgf\" (UID: \"941bda47-168e-496a-b60f-c4edb4560bcc\") " pod="openstack/barbican-api-dd6ff4df6-gfkgf" Nov 25 09:58:41 crc kubenswrapper[4854]: I1125 09:58:41.753160 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9r7q6\" (UniqueName: \"kubernetes.io/projected/941bda47-168e-496a-b60f-c4edb4560bcc-kube-api-access-9r7q6\") pod \"barbican-api-dd6ff4df6-gfkgf\" (UID: \"941bda47-168e-496a-b60f-c4edb4560bcc\") " pod="openstack/barbican-api-dd6ff4df6-gfkgf" Nov 25 09:58:41 crc kubenswrapper[4854]: I1125 09:58:41.779315 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-dd6ff4df6-gfkgf" Nov 25 09:58:43 crc kubenswrapper[4854]: I1125 09:58:43.687010 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 25 09:58:43 crc kubenswrapper[4854]: I1125 09:58:43.687621 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 25 09:58:43 crc kubenswrapper[4854]: I1125 09:58:43.687759 4854 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 09:58:43 crc kubenswrapper[4854]: I1125 09:58:43.687813 4854 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 09:58:43 crc kubenswrapper[4854]: I1125 09:58:43.688407 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 25 09:58:43 crc kubenswrapper[4854]: I1125 09:58:43.702207 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 25 09:58:44 crc kubenswrapper[4854]: I1125 09:58:44.801301 4854 generic.go:334] "Generic (PLEG): container finished" podID="ec8d5beb-439a-4921-a6b8-029331402149" containerID="1228aff76ce4e075c53c6f5d77ee2da25523328069aa7ab95cf151dc04c7c6f7" exitCode=0 Nov 25 09:58:44 crc kubenswrapper[4854]: I1125 09:58:44.801493 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-pfkjn" event={"ID":"ec8d5beb-439a-4921-a6b8-029331402149","Type":"ContainerDied","Data":"1228aff76ce4e075c53c6f5d77ee2da25523328069aa7ab95cf151dc04c7c6f7"} Nov 25 09:58:46 crc kubenswrapper[4854]: I1125 09:58:46.641202 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-pfkjn" Nov 25 09:58:46 crc kubenswrapper[4854]: I1125 09:58:46.787005 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mfd67\" (UniqueName: \"kubernetes.io/projected/ec8d5beb-439a-4921-a6b8-029331402149-kube-api-access-mfd67\") pod \"ec8d5beb-439a-4921-a6b8-029331402149\" (UID: \"ec8d5beb-439a-4921-a6b8-029331402149\") " Nov 25 09:58:46 crc kubenswrapper[4854]: I1125 09:58:46.787223 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec8d5beb-439a-4921-a6b8-029331402149-config-data\") pod \"ec8d5beb-439a-4921-a6b8-029331402149\" (UID: \"ec8d5beb-439a-4921-a6b8-029331402149\") " Nov 25 09:58:46 crc kubenswrapper[4854]: I1125 09:58:46.787408 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec8d5beb-439a-4921-a6b8-029331402149-combined-ca-bundle\") pod \"ec8d5beb-439a-4921-a6b8-029331402149\" (UID: \"ec8d5beb-439a-4921-a6b8-029331402149\") " Nov 25 09:58:46 crc kubenswrapper[4854]: I1125 09:58:46.841092 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec8d5beb-439a-4921-a6b8-029331402149-kube-api-access-mfd67" (OuterVolumeSpecName: "kube-api-access-mfd67") pod "ec8d5beb-439a-4921-a6b8-029331402149" (UID: "ec8d5beb-439a-4921-a6b8-029331402149"). InnerVolumeSpecName "kube-api-access-mfd67". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:58:46 crc kubenswrapper[4854]: I1125 09:58:46.871264 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-pfkjn" event={"ID":"ec8d5beb-439a-4921-a6b8-029331402149","Type":"ContainerDied","Data":"eefa3086f4d38fdb1c72755e17b4ef8c15f93dfaa5e3f6662950163c7d8bf538"} Nov 25 09:58:46 crc kubenswrapper[4854]: I1125 09:58:46.871306 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eefa3086f4d38fdb1c72755e17b4ef8c15f93dfaa5e3f6662950163c7d8bf538" Nov 25 09:58:46 crc kubenswrapper[4854]: I1125 09:58:46.871375 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-pfkjn" Nov 25 09:58:46 crc kubenswrapper[4854]: I1125 09:58:46.886282 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec8d5beb-439a-4921-a6b8-029331402149-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ec8d5beb-439a-4921-a6b8-029331402149" (UID: "ec8d5beb-439a-4921-a6b8-029331402149"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:58:46 crc kubenswrapper[4854]: I1125 09:58:46.894696 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec8d5beb-439a-4921-a6b8-029331402149-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:46 crc kubenswrapper[4854]: I1125 09:58:46.894733 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mfd67\" (UniqueName: \"kubernetes.io/projected/ec8d5beb-439a-4921-a6b8-029331402149-kube-api-access-mfd67\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:46 crc kubenswrapper[4854]: I1125 09:58:46.978830 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec8d5beb-439a-4921-a6b8-029331402149-config-data" (OuterVolumeSpecName: "config-data") pod "ec8d5beb-439a-4921-a6b8-029331402149" (UID: "ec8d5beb-439a-4921-a6b8-029331402149"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:58:46 crc kubenswrapper[4854]: I1125 09:58:46.997348 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec8d5beb-439a-4921-a6b8-029331402149-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:47 crc kubenswrapper[4854]: I1125 09:58:47.518417 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-6ccd654df4-nc9k5" podUID="c0c3d44b-bac5-40fd-9edd-b6ee78913589" containerName="barbican-api-log" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:58:48 crc kubenswrapper[4854]: I1125 09:58:48.594413 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-dd6ff4df6-gfkgf"] Nov 25 09:58:49 crc kubenswrapper[4854]: I1125 09:58:49.921412 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c7l4t" event={"ID":"bb7eee5b-75e6-483b-a68e-1d6e39814690","Type":"ContainerStarted","Data":"1bc02835fc8d80cd84ee5a8928bc6761510a9f7f44c3ab5538d8a17a5662378b"} Nov 25 09:58:49 crc kubenswrapper[4854]: I1125 09:58:49.925155 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-dd6ff4df6-gfkgf" event={"ID":"941bda47-168e-496a-b60f-c4edb4560bcc","Type":"ContainerStarted","Data":"117291896ba22f6d478f5450feab5eee3037352b1f44618f284a6c9818d8753f"} Nov 25 09:58:49 crc kubenswrapper[4854]: I1125 09:58:49.927845 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c67bffd47-9zxbd" event={"ID":"b33c7d9f-3d87-473f-b442-faeffed65c62","Type":"ContainerStarted","Data":"c3ae0a50706e1726b9ba172b6be2013a6e25d3179f2f535b146439c102f4b6b9"} Nov 25 09:58:49 crc kubenswrapper[4854]: I1125 09:58:49.929075 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7c67bffd47-9zxbd" Nov 25 09:58:49 crc kubenswrapper[4854]: I1125 09:58:49.960762 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-c7l4t" podStartSLOduration=7.574610901 podStartE2EDuration="18.960735651s" podCreationTimestamp="2025-11-25 09:58:31 +0000 UTC" firstStartedPulling="2025-11-25 09:58:33.473102941 +0000 UTC m=+1319.326096317" lastFinishedPulling="2025-11-25 09:58:44.859227681 +0000 UTC m=+1330.712221067" observedRunningTime="2025-11-25 09:58:49.947160937 +0000 UTC m=+1335.800154313" watchObservedRunningTime="2025-11-25 09:58:49.960735651 +0000 UTC m=+1335.813729027" Nov 25 09:58:49 crc kubenswrapper[4854]: I1125 09:58:49.974932 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7c67bffd47-9zxbd" podStartSLOduration=12.974915061 podStartE2EDuration="12.974915061s" podCreationTimestamp="2025-11-25 09:58:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:58:49.967780634 +0000 UTC m=+1335.820774010" watchObservedRunningTime="2025-11-25 09:58:49.974915061 +0000 UTC m=+1335.827908437" Nov 25 09:58:50 crc kubenswrapper[4854]: E1125 09:58:50.118146 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="c7153c0a-3527-49ba-a5f2-0b6f5b1b219d" Nov 25 09:58:50 crc kubenswrapper[4854]: I1125 09:58:50.941112 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d","Type":"ContainerStarted","Data":"d096aad0172b8cdda6e662ead6338a5c700f45c6dc4bbca4ab045d1b516e005f"} Nov 25 09:58:50 crc kubenswrapper[4854]: I1125 09:58:50.941242 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c7153c0a-3527-49ba-a5f2-0b6f5b1b219d" containerName="ceilometer-notification-agent" containerID="cri-o://0e07de8aeee86e3295322c7fff2756ab9347f33c7f606b93fb11b523a120b651" gracePeriod=30 Nov 25 09:58:50 crc kubenswrapper[4854]: I1125 09:58:50.941266 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c7153c0a-3527-49ba-a5f2-0b6f5b1b219d" containerName="proxy-httpd" containerID="cri-o://d096aad0172b8cdda6e662ead6338a5c700f45c6dc4bbca4ab045d1b516e005f" gracePeriod=30 Nov 25 09:58:50 crc kubenswrapper[4854]: I1125 09:58:50.941305 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c7153c0a-3527-49ba-a5f2-0b6f5b1b219d" containerName="sg-core" containerID="cri-o://d8159c14ac650c6b0bf71ec92dd0aea9bbf8a80af16eae92942c0d587ea54711" gracePeriod=30 Nov 25 09:58:50 crc kubenswrapper[4854]: I1125 09:58:50.941596 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 09:58:50 crc kubenswrapper[4854]: I1125 09:58:50.948687 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-586fb5bfc9-zcsm7" event={"ID":"fd27c53c-e9d6-40de-9d4c-fea018061c07","Type":"ContainerStarted","Data":"fe67038ce3909a0e2c17449d440a6d057c16eb996c3751c544cd2c2c2576ab7a"} Nov 25 09:58:50 crc kubenswrapper[4854]: I1125 09:58:50.948949 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-586fb5bfc9-zcsm7" event={"ID":"fd27c53c-e9d6-40de-9d4c-fea018061c07","Type":"ContainerStarted","Data":"a8c2b5a761a055179e1ee598035077581480f914b604a0662930beb7c875ba6c"} Nov 25 09:58:50 crc kubenswrapper[4854]: I1125 09:58:50.957309 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-p594s" event={"ID":"4f828059-1092-45cd-99a8-3915b6bab37f","Type":"ContainerStarted","Data":"ed75b623deadff67db66cc75a930a45b815889596ca105d7959096e1f197f93d"} Nov 25 09:58:50 crc kubenswrapper[4854]: I1125 09:58:50.960592 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-84d58997f8-bpzfk" event={"ID":"3d226a10-004d-4b8c-8282-4f7955c8d41f","Type":"ContainerStarted","Data":"c9bb7383fd54bc37f1c07310be61dc2d27e76bdc8dbf9d0cf8d43875c742020f"} Nov 25 09:58:50 crc kubenswrapper[4854]: I1125 09:58:50.960821 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-84d58997f8-bpzfk" event={"ID":"3d226a10-004d-4b8c-8282-4f7955c8d41f","Type":"ContainerStarted","Data":"06d0f1e4585d9132f977fbeb20a43ae71ddd9e8360ee8cd216ff312d2525b405"} Nov 25 09:58:50 crc kubenswrapper[4854]: I1125 09:58:50.962912 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-dd6ff4df6-gfkgf" event={"ID":"941bda47-168e-496a-b60f-c4edb4560bcc","Type":"ContainerStarted","Data":"13e49ecc0f4d2313a8bc9b43765cdc50e4474735fd11787795c8740827653954"} Nov 25 09:58:50 crc kubenswrapper[4854]: I1125 09:58:50.962983 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-dd6ff4df6-gfkgf" event={"ID":"941bda47-168e-496a-b60f-c4edb4560bcc","Type":"ContainerStarted","Data":"4b7e87d9092f3c46624a19a96e619a071440c637644649a5de84d76a9a5f25bd"} Nov 25 09:58:51 crc kubenswrapper[4854]: I1125 09:58:51.014114 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-84d58997f8-bpzfk" podStartSLOduration=3.240619305 podStartE2EDuration="14.014095359s" podCreationTimestamp="2025-11-25 09:58:37 +0000 UTC" firstStartedPulling="2025-11-25 09:58:39.0086596 +0000 UTC m=+1324.861652976" lastFinishedPulling="2025-11-25 09:58:49.782135654 +0000 UTC m=+1335.635129030" observedRunningTime="2025-11-25 09:58:50.993157322 +0000 UTC m=+1336.846150698" watchObservedRunningTime="2025-11-25 09:58:51.014095359 +0000 UTC m=+1336.867088735" Nov 25 09:58:51 crc kubenswrapper[4854]: I1125 09:58:51.051238 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-586fb5bfc9-zcsm7" podStartSLOduration=3.592651725 podStartE2EDuration="14.051217651s" podCreationTimestamp="2025-11-25 09:58:37 +0000 UTC" firstStartedPulling="2025-11-25 09:58:39.040032653 +0000 UTC m=+1324.893026029" lastFinishedPulling="2025-11-25 09:58:49.498598579 +0000 UTC m=+1335.351591955" observedRunningTime="2025-11-25 09:58:51.013748269 +0000 UTC m=+1336.866741655" watchObservedRunningTime="2025-11-25 09:58:51.051217651 +0000 UTC m=+1336.904211027" Nov 25 09:58:51 crc kubenswrapper[4854]: I1125 09:58:51.099998 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-p594s" podStartSLOduration=4.511659438 podStartE2EDuration="1m1.099950832s" podCreationTimestamp="2025-11-25 09:57:50 +0000 UTC" firstStartedPulling="2025-11-25 09:57:52.911721433 +0000 UTC m=+1278.764714809" lastFinishedPulling="2025-11-25 09:58:49.500012827 +0000 UTC m=+1335.353006203" observedRunningTime="2025-11-25 09:58:51.046222323 +0000 UTC m=+1336.899215699" watchObservedRunningTime="2025-11-25 09:58:51.099950832 +0000 UTC m=+1336.952944208" Nov 25 09:58:51 crc kubenswrapper[4854]: I1125 09:58:51.124038 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6ccd654df4-nc9k5" Nov 25 09:58:51 crc kubenswrapper[4854]: I1125 09:58:51.124251 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-dd6ff4df6-gfkgf" podStartSLOduration=10.124184879 podStartE2EDuration="10.124184879s" podCreationTimestamp="2025-11-25 09:58:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:58:51.083560911 +0000 UTC m=+1336.936554297" watchObservedRunningTime="2025-11-25 09:58:51.124184879 +0000 UTC m=+1336.977178255" Nov 25 09:58:51 crc kubenswrapper[4854]: I1125 09:58:51.658262 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-c7l4t" Nov 25 09:58:51 crc kubenswrapper[4854]: I1125 09:58:51.659876 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-c7l4t" Nov 25 09:58:51 crc kubenswrapper[4854]: I1125 09:58:51.662732 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6ccd654df4-nc9k5" Nov 25 09:58:51 crc kubenswrapper[4854]: I1125 09:58:51.784861 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-dd6ff4df6-gfkgf" Nov 25 09:58:51 crc kubenswrapper[4854]: I1125 09:58:51.784917 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-dd6ff4df6-gfkgf" Nov 25 09:58:52 crc kubenswrapper[4854]: I1125 09:58:52.002627 4854 generic.go:334] "Generic (PLEG): container finished" podID="c7153c0a-3527-49ba-a5f2-0b6f5b1b219d" containerID="d096aad0172b8cdda6e662ead6338a5c700f45c6dc4bbca4ab045d1b516e005f" exitCode=0 Nov 25 09:58:52 crc kubenswrapper[4854]: I1125 09:58:52.002660 4854 generic.go:334] "Generic (PLEG): container finished" podID="c7153c0a-3527-49ba-a5f2-0b6f5b1b219d" containerID="d8159c14ac650c6b0bf71ec92dd0aea9bbf8a80af16eae92942c0d587ea54711" exitCode=2 Nov 25 09:58:52 crc kubenswrapper[4854]: I1125 09:58:52.003820 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d","Type":"ContainerDied","Data":"d096aad0172b8cdda6e662ead6338a5c700f45c6dc4bbca4ab045d1b516e005f"} Nov 25 09:58:52 crc kubenswrapper[4854]: I1125 09:58:52.003847 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d","Type":"ContainerDied","Data":"d8159c14ac650c6b0bf71ec92dd0aea9bbf8a80af16eae92942c0d587ea54711"} Nov 25 09:58:52 crc kubenswrapper[4854]: I1125 09:58:52.747572 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-c7l4t" podUID="bb7eee5b-75e6-483b-a68e-1d6e39814690" containerName="registry-server" probeResult="failure" output=< Nov 25 09:58:52 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 09:58:52 crc kubenswrapper[4854]: > Nov 25 09:58:53 crc kubenswrapper[4854]: I1125 09:58:53.023553 4854 generic.go:334] "Generic (PLEG): container finished" podID="816b6c7b-9d88-412e-8e20-5630cc8fd4a9" containerID="4c74dced21ec183c10db63707e93a19cdc6f123ef3543bba8f2236f96fd253c2" exitCode=0 Nov 25 09:58:53 crc kubenswrapper[4854]: I1125 09:58:53.026798 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-5h66s" event={"ID":"816b6c7b-9d88-412e-8e20-5630cc8fd4a9","Type":"ContainerDied","Data":"4c74dced21ec183c10db63707e93a19cdc6f123ef3543bba8f2236f96fd253c2"} Nov 25 09:58:54 crc kubenswrapper[4854]: I1125 09:58:54.610049 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-5h66s" Nov 25 09:58:54 crc kubenswrapper[4854]: I1125 09:58:54.722264 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/816b6c7b-9d88-412e-8e20-5630cc8fd4a9-config\") pod \"816b6c7b-9d88-412e-8e20-5630cc8fd4a9\" (UID: \"816b6c7b-9d88-412e-8e20-5630cc8fd4a9\") " Nov 25 09:58:54 crc kubenswrapper[4854]: I1125 09:58:54.722562 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/816b6c7b-9d88-412e-8e20-5630cc8fd4a9-combined-ca-bundle\") pod \"816b6c7b-9d88-412e-8e20-5630cc8fd4a9\" (UID: \"816b6c7b-9d88-412e-8e20-5630cc8fd4a9\") " Nov 25 09:58:54 crc kubenswrapper[4854]: I1125 09:58:54.722617 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rp84h\" (UniqueName: \"kubernetes.io/projected/816b6c7b-9d88-412e-8e20-5630cc8fd4a9-kube-api-access-rp84h\") pod \"816b6c7b-9d88-412e-8e20-5630cc8fd4a9\" (UID: \"816b6c7b-9d88-412e-8e20-5630cc8fd4a9\") " Nov 25 09:58:54 crc kubenswrapper[4854]: I1125 09:58:54.732902 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/816b6c7b-9d88-412e-8e20-5630cc8fd4a9-kube-api-access-rp84h" (OuterVolumeSpecName: "kube-api-access-rp84h") pod "816b6c7b-9d88-412e-8e20-5630cc8fd4a9" (UID: "816b6c7b-9d88-412e-8e20-5630cc8fd4a9"). InnerVolumeSpecName "kube-api-access-rp84h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:58:54 crc kubenswrapper[4854]: I1125 09:58:54.757816 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/816b6c7b-9d88-412e-8e20-5630cc8fd4a9-config" (OuterVolumeSpecName: "config") pod "816b6c7b-9d88-412e-8e20-5630cc8fd4a9" (UID: "816b6c7b-9d88-412e-8e20-5630cc8fd4a9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:58:54 crc kubenswrapper[4854]: I1125 09:58:54.778440 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/816b6c7b-9d88-412e-8e20-5630cc8fd4a9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "816b6c7b-9d88-412e-8e20-5630cc8fd4a9" (UID: "816b6c7b-9d88-412e-8e20-5630cc8fd4a9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:58:54 crc kubenswrapper[4854]: I1125 09:58:54.825112 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/816b6c7b-9d88-412e-8e20-5630cc8fd4a9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:54 crc kubenswrapper[4854]: I1125 09:58:54.825148 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rp84h\" (UniqueName: \"kubernetes.io/projected/816b6c7b-9d88-412e-8e20-5630cc8fd4a9-kube-api-access-rp84h\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:54 crc kubenswrapper[4854]: I1125 09:58:54.825158 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/816b6c7b-9d88-412e-8e20-5630cc8fd4a9-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.029778 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.029841 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.056235 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-5h66s" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.056335 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-5h66s" event={"ID":"816b6c7b-9d88-412e-8e20-5630cc8fd4a9","Type":"ContainerDied","Data":"72bcdd2ad4b7aad7546d1ea3fecc2825bda956c47cfb3fdbd679da672fc8a442"} Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.056391 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="72bcdd2ad4b7aad7546d1ea3fecc2825bda956c47cfb3fdbd679da672fc8a442" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.075607 4854 generic.go:334] "Generic (PLEG): container finished" podID="c7153c0a-3527-49ba-a5f2-0b6f5b1b219d" containerID="0e07de8aeee86e3295322c7fff2756ab9347f33c7f606b93fb11b523a120b651" exitCode=0 Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.075661 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d","Type":"ContainerDied","Data":"0e07de8aeee86e3295322c7fff2756ab9347f33c7f606b93fb11b523a120b651"} Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.186841 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.235628 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c67bffd47-9zxbd"] Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.235874 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7c67bffd47-9zxbd" podUID="b33c7d9f-3d87-473f-b442-faeffed65c62" containerName="dnsmasq-dns" containerID="cri-o://c3ae0a50706e1726b9ba172b6be2013a6e25d3179f2f535b146439c102f4b6b9" gracePeriod=10 Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.245894 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7c67bffd47-9zxbd" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.308449 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-l67tz"] Nov 25 09:58:55 crc kubenswrapper[4854]: E1125 09:58:55.308988 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7153c0a-3527-49ba-a5f2-0b6f5b1b219d" containerName="proxy-httpd" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.309011 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7153c0a-3527-49ba-a5f2-0b6f5b1b219d" containerName="proxy-httpd" Nov 25 09:58:55 crc kubenswrapper[4854]: E1125 09:58:55.309048 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="816b6c7b-9d88-412e-8e20-5630cc8fd4a9" containerName="neutron-db-sync" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.309058 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="816b6c7b-9d88-412e-8e20-5630cc8fd4a9" containerName="neutron-db-sync" Nov 25 09:58:55 crc kubenswrapper[4854]: E1125 09:58:55.309079 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7153c0a-3527-49ba-a5f2-0b6f5b1b219d" containerName="ceilometer-notification-agent" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.309086 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7153c0a-3527-49ba-a5f2-0b6f5b1b219d" containerName="ceilometer-notification-agent" Nov 25 09:58:55 crc kubenswrapper[4854]: E1125 09:58:55.309109 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7153c0a-3527-49ba-a5f2-0b6f5b1b219d" containerName="sg-core" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.309115 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7153c0a-3527-49ba-a5f2-0b6f5b1b219d" containerName="sg-core" Nov 25 09:58:55 crc kubenswrapper[4854]: E1125 09:58:55.309127 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec8d5beb-439a-4921-a6b8-029331402149" containerName="heat-db-sync" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.309133 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec8d5beb-439a-4921-a6b8-029331402149" containerName="heat-db-sync" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.309342 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="816b6c7b-9d88-412e-8e20-5630cc8fd4a9" containerName="neutron-db-sync" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.309356 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec8d5beb-439a-4921-a6b8-029331402149" containerName="heat-db-sync" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.309372 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7153c0a-3527-49ba-a5f2-0b6f5b1b219d" containerName="sg-core" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.309385 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7153c0a-3527-49ba-a5f2-0b6f5b1b219d" containerName="ceilometer-notification-agent" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.309403 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7153c0a-3527-49ba-a5f2-0b6f5b1b219d" containerName="proxy-httpd" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.310769 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-l67tz" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.326078 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-84d577c77b-wtjcw"] Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.327939 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-84d577c77b-wtjcw" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.331714 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-z7f8l" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.331828 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.331919 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.332004 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.337863 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-scripts\") pod \"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d\" (UID: \"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d\") " Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.337988 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-log-httpd\") pod \"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d\" (UID: \"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d\") " Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.338037 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-config-data\") pod \"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d\" (UID: \"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d\") " Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.338058 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lflqq\" (UniqueName: \"kubernetes.io/projected/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-kube-api-access-lflqq\") pod \"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d\" (UID: \"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d\") " Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.338094 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-sg-core-conf-yaml\") pod \"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d\" (UID: \"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d\") " Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.338123 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-run-httpd\") pod \"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d\" (UID: \"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d\") " Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.338340 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-combined-ca-bundle\") pod \"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d\" (UID: \"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d\") " Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.338467 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "c7153c0a-3527-49ba-a5f2-0b6f5b1b219d" (UID: "c7153c0a-3527-49ba-a5f2-0b6f5b1b219d"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.338831 4854 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.342942 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "c7153c0a-3527-49ba-a5f2-0b6f5b1b219d" (UID: "c7153c0a-3527-49ba-a5f2-0b6f5b1b219d"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.353346 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-kube-api-access-lflqq" (OuterVolumeSpecName: "kube-api-access-lflqq") pod "c7153c0a-3527-49ba-a5f2-0b6f5b1b219d" (UID: "c7153c0a-3527-49ba-a5f2-0b6f5b1b219d"). InnerVolumeSpecName "kube-api-access-lflqq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.353492 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-scripts" (OuterVolumeSpecName: "scripts") pod "c7153c0a-3527-49ba-a5f2-0b6f5b1b219d" (UID: "c7153c0a-3527-49ba-a5f2-0b6f5b1b219d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.360954 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-84d577c77b-wtjcw"] Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.388169 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-l67tz"] Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.416648 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "c7153c0a-3527-49ba-a5f2-0b6f5b1b219d" (UID: "c7153c0a-3527-49ba-a5f2-0b6f5b1b219d"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.440872 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ed971a6-e96d-409b-af33-1016e52a207f-config\") pod \"dnsmasq-dns-848cf88cfc-l67tz\" (UID: \"5ed971a6-e96d-409b-af33-1016e52a207f\") " pod="openstack/dnsmasq-dns-848cf88cfc-l67tz" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.441016 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5ed971a6-e96d-409b-af33-1016e52a207f-dns-swift-storage-0\") pod \"dnsmasq-dns-848cf88cfc-l67tz\" (UID: \"5ed971a6-e96d-409b-af33-1016e52a207f\") " pod="openstack/dnsmasq-dns-848cf88cfc-l67tz" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.441112 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zbsdx\" (UniqueName: \"kubernetes.io/projected/2981e6fc-b8dc-45b4-a42d-4ccbd0372287-kube-api-access-zbsdx\") pod \"neutron-84d577c77b-wtjcw\" (UID: \"2981e6fc-b8dc-45b4-a42d-4ccbd0372287\") " pod="openstack/neutron-84d577c77b-wtjcw" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.441194 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9zq6j\" (UniqueName: \"kubernetes.io/projected/5ed971a6-e96d-409b-af33-1016e52a207f-kube-api-access-9zq6j\") pod \"dnsmasq-dns-848cf88cfc-l67tz\" (UID: \"5ed971a6-e96d-409b-af33-1016e52a207f\") " pod="openstack/dnsmasq-dns-848cf88cfc-l67tz" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.441263 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5ed971a6-e96d-409b-af33-1016e52a207f-ovsdbserver-sb\") pod \"dnsmasq-dns-848cf88cfc-l67tz\" (UID: \"5ed971a6-e96d-409b-af33-1016e52a207f\") " pod="openstack/dnsmasq-dns-848cf88cfc-l67tz" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.441313 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5ed971a6-e96d-409b-af33-1016e52a207f-ovsdbserver-nb\") pod \"dnsmasq-dns-848cf88cfc-l67tz\" (UID: \"5ed971a6-e96d-409b-af33-1016e52a207f\") " pod="openstack/dnsmasq-dns-848cf88cfc-l67tz" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.441342 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/2981e6fc-b8dc-45b4-a42d-4ccbd0372287-httpd-config\") pod \"neutron-84d577c77b-wtjcw\" (UID: \"2981e6fc-b8dc-45b4-a42d-4ccbd0372287\") " pod="openstack/neutron-84d577c77b-wtjcw" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.441426 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/2981e6fc-b8dc-45b4-a42d-4ccbd0372287-config\") pod \"neutron-84d577c77b-wtjcw\" (UID: \"2981e6fc-b8dc-45b4-a42d-4ccbd0372287\") " pod="openstack/neutron-84d577c77b-wtjcw" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.441551 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/2981e6fc-b8dc-45b4-a42d-4ccbd0372287-ovndb-tls-certs\") pod \"neutron-84d577c77b-wtjcw\" (UID: \"2981e6fc-b8dc-45b4-a42d-4ccbd0372287\") " pod="openstack/neutron-84d577c77b-wtjcw" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.443334 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5ed971a6-e96d-409b-af33-1016e52a207f-dns-svc\") pod \"dnsmasq-dns-848cf88cfc-l67tz\" (UID: \"5ed971a6-e96d-409b-af33-1016e52a207f\") " pod="openstack/dnsmasq-dns-848cf88cfc-l67tz" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.443451 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2981e6fc-b8dc-45b4-a42d-4ccbd0372287-combined-ca-bundle\") pod \"neutron-84d577c77b-wtjcw\" (UID: \"2981e6fc-b8dc-45b4-a42d-4ccbd0372287\") " pod="openstack/neutron-84d577c77b-wtjcw" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.443507 4854 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.443538 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lflqq\" (UniqueName: \"kubernetes.io/projected/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-kube-api-access-lflqq\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.443548 4854 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.443557 4854 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.442019 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c7153c0a-3527-49ba-a5f2-0b6f5b1b219d" (UID: "c7153c0a-3527-49ba-a5f2-0b6f5b1b219d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.472918 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-config-data" (OuterVolumeSpecName: "config-data") pod "c7153c0a-3527-49ba-a5f2-0b6f5b1b219d" (UID: "c7153c0a-3527-49ba-a5f2-0b6f5b1b219d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.545030 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/2981e6fc-b8dc-45b4-a42d-4ccbd0372287-config\") pod \"neutron-84d577c77b-wtjcw\" (UID: \"2981e6fc-b8dc-45b4-a42d-4ccbd0372287\") " pod="openstack/neutron-84d577c77b-wtjcw" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.545118 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/2981e6fc-b8dc-45b4-a42d-4ccbd0372287-ovndb-tls-certs\") pod \"neutron-84d577c77b-wtjcw\" (UID: \"2981e6fc-b8dc-45b4-a42d-4ccbd0372287\") " pod="openstack/neutron-84d577c77b-wtjcw" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.545170 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5ed971a6-e96d-409b-af33-1016e52a207f-dns-svc\") pod \"dnsmasq-dns-848cf88cfc-l67tz\" (UID: \"5ed971a6-e96d-409b-af33-1016e52a207f\") " pod="openstack/dnsmasq-dns-848cf88cfc-l67tz" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.545206 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2981e6fc-b8dc-45b4-a42d-4ccbd0372287-combined-ca-bundle\") pod \"neutron-84d577c77b-wtjcw\" (UID: \"2981e6fc-b8dc-45b4-a42d-4ccbd0372287\") " pod="openstack/neutron-84d577c77b-wtjcw" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.545264 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ed971a6-e96d-409b-af33-1016e52a207f-config\") pod \"dnsmasq-dns-848cf88cfc-l67tz\" (UID: \"5ed971a6-e96d-409b-af33-1016e52a207f\") " pod="openstack/dnsmasq-dns-848cf88cfc-l67tz" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.545330 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5ed971a6-e96d-409b-af33-1016e52a207f-dns-swift-storage-0\") pod \"dnsmasq-dns-848cf88cfc-l67tz\" (UID: \"5ed971a6-e96d-409b-af33-1016e52a207f\") " pod="openstack/dnsmasq-dns-848cf88cfc-l67tz" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.545369 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zbsdx\" (UniqueName: \"kubernetes.io/projected/2981e6fc-b8dc-45b4-a42d-4ccbd0372287-kube-api-access-zbsdx\") pod \"neutron-84d577c77b-wtjcw\" (UID: \"2981e6fc-b8dc-45b4-a42d-4ccbd0372287\") " pod="openstack/neutron-84d577c77b-wtjcw" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.545392 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9zq6j\" (UniqueName: \"kubernetes.io/projected/5ed971a6-e96d-409b-af33-1016e52a207f-kube-api-access-9zq6j\") pod \"dnsmasq-dns-848cf88cfc-l67tz\" (UID: \"5ed971a6-e96d-409b-af33-1016e52a207f\") " pod="openstack/dnsmasq-dns-848cf88cfc-l67tz" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.545414 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5ed971a6-e96d-409b-af33-1016e52a207f-ovsdbserver-sb\") pod \"dnsmasq-dns-848cf88cfc-l67tz\" (UID: \"5ed971a6-e96d-409b-af33-1016e52a207f\") " pod="openstack/dnsmasq-dns-848cf88cfc-l67tz" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.545441 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5ed971a6-e96d-409b-af33-1016e52a207f-ovsdbserver-nb\") pod \"dnsmasq-dns-848cf88cfc-l67tz\" (UID: \"5ed971a6-e96d-409b-af33-1016e52a207f\") " pod="openstack/dnsmasq-dns-848cf88cfc-l67tz" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.545460 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/2981e6fc-b8dc-45b4-a42d-4ccbd0372287-httpd-config\") pod \"neutron-84d577c77b-wtjcw\" (UID: \"2981e6fc-b8dc-45b4-a42d-4ccbd0372287\") " pod="openstack/neutron-84d577c77b-wtjcw" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.545533 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.545545 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.546317 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ed971a6-e96d-409b-af33-1016e52a207f-config\") pod \"dnsmasq-dns-848cf88cfc-l67tz\" (UID: \"5ed971a6-e96d-409b-af33-1016e52a207f\") " pod="openstack/dnsmasq-dns-848cf88cfc-l67tz" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.546693 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5ed971a6-e96d-409b-af33-1016e52a207f-dns-swift-storage-0\") pod \"dnsmasq-dns-848cf88cfc-l67tz\" (UID: \"5ed971a6-e96d-409b-af33-1016e52a207f\") " pod="openstack/dnsmasq-dns-848cf88cfc-l67tz" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.547446 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5ed971a6-e96d-409b-af33-1016e52a207f-dns-svc\") pod \"dnsmasq-dns-848cf88cfc-l67tz\" (UID: \"5ed971a6-e96d-409b-af33-1016e52a207f\") " pod="openstack/dnsmasq-dns-848cf88cfc-l67tz" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.548079 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5ed971a6-e96d-409b-af33-1016e52a207f-ovsdbserver-sb\") pod \"dnsmasq-dns-848cf88cfc-l67tz\" (UID: \"5ed971a6-e96d-409b-af33-1016e52a207f\") " pod="openstack/dnsmasq-dns-848cf88cfc-l67tz" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.548131 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5ed971a6-e96d-409b-af33-1016e52a207f-ovsdbserver-nb\") pod \"dnsmasq-dns-848cf88cfc-l67tz\" (UID: \"5ed971a6-e96d-409b-af33-1016e52a207f\") " pod="openstack/dnsmasq-dns-848cf88cfc-l67tz" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.554023 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2981e6fc-b8dc-45b4-a42d-4ccbd0372287-combined-ca-bundle\") pod \"neutron-84d577c77b-wtjcw\" (UID: \"2981e6fc-b8dc-45b4-a42d-4ccbd0372287\") " pod="openstack/neutron-84d577c77b-wtjcw" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.555476 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/2981e6fc-b8dc-45b4-a42d-4ccbd0372287-config\") pod \"neutron-84d577c77b-wtjcw\" (UID: \"2981e6fc-b8dc-45b4-a42d-4ccbd0372287\") " pod="openstack/neutron-84d577c77b-wtjcw" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.556134 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/2981e6fc-b8dc-45b4-a42d-4ccbd0372287-httpd-config\") pod \"neutron-84d577c77b-wtjcw\" (UID: \"2981e6fc-b8dc-45b4-a42d-4ccbd0372287\") " pod="openstack/neutron-84d577c77b-wtjcw" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.558622 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/2981e6fc-b8dc-45b4-a42d-4ccbd0372287-ovndb-tls-certs\") pod \"neutron-84d577c77b-wtjcw\" (UID: \"2981e6fc-b8dc-45b4-a42d-4ccbd0372287\") " pod="openstack/neutron-84d577c77b-wtjcw" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.571228 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9zq6j\" (UniqueName: \"kubernetes.io/projected/5ed971a6-e96d-409b-af33-1016e52a207f-kube-api-access-9zq6j\") pod \"dnsmasq-dns-848cf88cfc-l67tz\" (UID: \"5ed971a6-e96d-409b-af33-1016e52a207f\") " pod="openstack/dnsmasq-dns-848cf88cfc-l67tz" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.573820 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zbsdx\" (UniqueName: \"kubernetes.io/projected/2981e6fc-b8dc-45b4-a42d-4ccbd0372287-kube-api-access-zbsdx\") pod \"neutron-84d577c77b-wtjcw\" (UID: \"2981e6fc-b8dc-45b4-a42d-4ccbd0372287\") " pod="openstack/neutron-84d577c77b-wtjcw" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.642917 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-l67tz" Nov 25 09:58:55 crc kubenswrapper[4854]: I1125 09:58:55.689784 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-84d577c77b-wtjcw" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.084161 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c67bffd47-9zxbd" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.102115 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.102462 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c7153c0a-3527-49ba-a5f2-0b6f5b1b219d","Type":"ContainerDied","Data":"3240676e5231a534e34674f7c695edeaa160de2a286121f336f26ab8756b59d3"} Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.102521 4854 scope.go:117] "RemoveContainer" containerID="d096aad0172b8cdda6e662ead6338a5c700f45c6dc4bbca4ab045d1b516e005f" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.111372 4854 generic.go:334] "Generic (PLEG): container finished" podID="b33c7d9f-3d87-473f-b442-faeffed65c62" containerID="c3ae0a50706e1726b9ba172b6be2013a6e25d3179f2f535b146439c102f4b6b9" exitCode=0 Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.111420 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c67bffd47-9zxbd" event={"ID":"b33c7d9f-3d87-473f-b442-faeffed65c62","Type":"ContainerDied","Data":"c3ae0a50706e1726b9ba172b6be2013a6e25d3179f2f535b146439c102f4b6b9"} Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.111452 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c67bffd47-9zxbd" event={"ID":"b33c7d9f-3d87-473f-b442-faeffed65c62","Type":"ContainerDied","Data":"e6b6e2fc018254447bdd1dd9fa337549565765336f2df74336d6f7ff5bc7dbf9"} Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.111522 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c67bffd47-9zxbd" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.167722 4854 scope.go:117] "RemoveContainer" containerID="d8159c14ac650c6b0bf71ec92dd0aea9bbf8a80af16eae92942c0d587ea54711" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.184896 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b33c7d9f-3d87-473f-b442-faeffed65c62-ovsdbserver-sb\") pod \"b33c7d9f-3d87-473f-b442-faeffed65c62\" (UID: \"b33c7d9f-3d87-473f-b442-faeffed65c62\") " Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.185002 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b33c7d9f-3d87-473f-b442-faeffed65c62-dns-swift-storage-0\") pod \"b33c7d9f-3d87-473f-b442-faeffed65c62\" (UID: \"b33c7d9f-3d87-473f-b442-faeffed65c62\") " Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.185045 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b33c7d9f-3d87-473f-b442-faeffed65c62-ovsdbserver-nb\") pod \"b33c7d9f-3d87-473f-b442-faeffed65c62\" (UID: \"b33c7d9f-3d87-473f-b442-faeffed65c62\") " Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.185269 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b33c7d9f-3d87-473f-b442-faeffed65c62-dns-svc\") pod \"b33c7d9f-3d87-473f-b442-faeffed65c62\" (UID: \"b33c7d9f-3d87-473f-b442-faeffed65c62\") " Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.185334 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b33c7d9f-3d87-473f-b442-faeffed65c62-config\") pod \"b33c7d9f-3d87-473f-b442-faeffed65c62\" (UID: \"b33c7d9f-3d87-473f-b442-faeffed65c62\") " Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.185432 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g5h6j\" (UniqueName: \"kubernetes.io/projected/b33c7d9f-3d87-473f-b442-faeffed65c62-kube-api-access-g5h6j\") pod \"b33c7d9f-3d87-473f-b442-faeffed65c62\" (UID: \"b33c7d9f-3d87-473f-b442-faeffed65c62\") " Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.221495 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b33c7d9f-3d87-473f-b442-faeffed65c62-kube-api-access-g5h6j" (OuterVolumeSpecName: "kube-api-access-g5h6j") pod "b33c7d9f-3d87-473f-b442-faeffed65c62" (UID: "b33c7d9f-3d87-473f-b442-faeffed65c62"). InnerVolumeSpecName "kube-api-access-g5h6j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.244112 4854 scope.go:117] "RemoveContainer" containerID="0e07de8aeee86e3295322c7fff2756ab9347f33c7f606b93fb11b523a120b651" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.258817 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.289171 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g5h6j\" (UniqueName: \"kubernetes.io/projected/b33c7d9f-3d87-473f-b442-faeffed65c62-kube-api-access-g5h6j\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.299714 4854 scope.go:117] "RemoveContainer" containerID="c3ae0a50706e1726b9ba172b6be2013a6e25d3179f2f535b146439c102f4b6b9" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.316187 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.318455 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b33c7d9f-3d87-473f-b442-faeffed65c62-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b33c7d9f-3d87-473f-b442-faeffed65c62" (UID: "b33c7d9f-3d87-473f-b442-faeffed65c62"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.332247 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:58:56 crc kubenswrapper[4854]: E1125 09:58:56.332989 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b33c7d9f-3d87-473f-b442-faeffed65c62" containerName="dnsmasq-dns" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.333114 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="b33c7d9f-3d87-473f-b442-faeffed65c62" containerName="dnsmasq-dns" Nov 25 09:58:56 crc kubenswrapper[4854]: E1125 09:58:56.333211 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b33c7d9f-3d87-473f-b442-faeffed65c62" containerName="init" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.333283 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="b33c7d9f-3d87-473f-b442-faeffed65c62" containerName="init" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.333619 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="b33c7d9f-3d87-473f-b442-faeffed65c62" containerName="dnsmasq-dns" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.335838 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b33c7d9f-3d87-473f-b442-faeffed65c62-config" (OuterVolumeSpecName: "config") pod "b33c7d9f-3d87-473f-b442-faeffed65c62" (UID: "b33c7d9f-3d87-473f-b442-faeffed65c62"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.337037 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.339173 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.339421 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.346352 4854 scope.go:117] "RemoveContainer" containerID="3045fab8051b564783e2fe8444deef7928fa3d48903982aaa3ab9decd76218a4" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.361622 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.369170 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b33c7d9f-3d87-473f-b442-faeffed65c62-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b33c7d9f-3d87-473f-b442-faeffed65c62" (UID: "b33c7d9f-3d87-473f-b442-faeffed65c62"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.376575 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b33c7d9f-3d87-473f-b442-faeffed65c62-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b33c7d9f-3d87-473f-b442-faeffed65c62" (UID: "b33c7d9f-3d87-473f-b442-faeffed65c62"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.404334 4854 scope.go:117] "RemoveContainer" containerID="c3ae0a50706e1726b9ba172b6be2013a6e25d3179f2f535b146439c102f4b6b9" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.405439 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b33c7d9f-3d87-473f-b442-faeffed65c62-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "b33c7d9f-3d87-473f-b442-faeffed65c62" (UID: "b33c7d9f-3d87-473f-b442-faeffed65c62"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:58:56 crc kubenswrapper[4854]: E1125 09:58:56.409200 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c3ae0a50706e1726b9ba172b6be2013a6e25d3179f2f535b146439c102f4b6b9\": container with ID starting with c3ae0a50706e1726b9ba172b6be2013a6e25d3179f2f535b146439c102f4b6b9 not found: ID does not exist" containerID="c3ae0a50706e1726b9ba172b6be2013a6e25d3179f2f535b146439c102f4b6b9" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.411504 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-l67tz"] Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.411526 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c3ae0a50706e1726b9ba172b6be2013a6e25d3179f2f535b146439c102f4b6b9"} err="failed to get container status \"c3ae0a50706e1726b9ba172b6be2013a6e25d3179f2f535b146439c102f4b6b9\": rpc error: code = NotFound desc = could not find container \"c3ae0a50706e1726b9ba172b6be2013a6e25d3179f2f535b146439c102f4b6b9\": container with ID starting with c3ae0a50706e1726b9ba172b6be2013a6e25d3179f2f535b146439c102f4b6b9 not found: ID does not exist" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.411595 4854 scope.go:117] "RemoveContainer" containerID="3045fab8051b564783e2fe8444deef7928fa3d48903982aaa3ab9decd76218a4" Nov 25 09:58:56 crc kubenswrapper[4854]: E1125 09:58:56.412949 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3045fab8051b564783e2fe8444deef7928fa3d48903982aaa3ab9decd76218a4\": container with ID starting with 3045fab8051b564783e2fe8444deef7928fa3d48903982aaa3ab9decd76218a4 not found: ID does not exist" containerID="3045fab8051b564783e2fe8444deef7928fa3d48903982aaa3ab9decd76218a4" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.413000 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3045fab8051b564783e2fe8444deef7928fa3d48903982aaa3ab9decd76218a4"} err="failed to get container status \"3045fab8051b564783e2fe8444deef7928fa3d48903982aaa3ab9decd76218a4\": rpc error: code = NotFound desc = could not find container \"3045fab8051b564783e2fe8444deef7928fa3d48903982aaa3ab9decd76218a4\": container with ID starting with 3045fab8051b564783e2fe8444deef7928fa3d48903982aaa3ab9decd76218a4 not found: ID does not exist" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.421291 4854 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b33c7d9f-3d87-473f-b442-faeffed65c62-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.422276 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b33c7d9f-3d87-473f-b442-faeffed65c62-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.423868 4854 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b33c7d9f-3d87-473f-b442-faeffed65c62-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.425895 4854 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b33c7d9f-3d87-473f-b442-faeffed65c62-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.425911 4854 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b33c7d9f-3d87-473f-b442-faeffed65c62-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.485717 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c67bffd47-9zxbd"] Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.508178 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7c67bffd47-9zxbd"] Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.531334 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4c578ce9-98b9-44f5-b090-c70d836bd2dc-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4c578ce9-98b9-44f5-b090-c70d836bd2dc\") " pod="openstack/ceilometer-0" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.531395 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c578ce9-98b9-44f5-b090-c70d836bd2dc-config-data\") pod \"ceilometer-0\" (UID: \"4c578ce9-98b9-44f5-b090-c70d836bd2dc\") " pod="openstack/ceilometer-0" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.531689 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4c578ce9-98b9-44f5-b090-c70d836bd2dc-run-httpd\") pod \"ceilometer-0\" (UID: \"4c578ce9-98b9-44f5-b090-c70d836bd2dc\") " pod="openstack/ceilometer-0" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.531708 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c578ce9-98b9-44f5-b090-c70d836bd2dc-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4c578ce9-98b9-44f5-b090-c70d836bd2dc\") " pod="openstack/ceilometer-0" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.531740 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4c578ce9-98b9-44f5-b090-c70d836bd2dc-log-httpd\") pod \"ceilometer-0\" (UID: \"4c578ce9-98b9-44f5-b090-c70d836bd2dc\") " pod="openstack/ceilometer-0" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.531897 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c578ce9-98b9-44f5-b090-c70d836bd2dc-scripts\") pod \"ceilometer-0\" (UID: \"4c578ce9-98b9-44f5-b090-c70d836bd2dc\") " pod="openstack/ceilometer-0" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.532062 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x8vjg\" (UniqueName: \"kubernetes.io/projected/4c578ce9-98b9-44f5-b090-c70d836bd2dc-kube-api-access-x8vjg\") pod \"ceilometer-0\" (UID: \"4c578ce9-98b9-44f5-b090-c70d836bd2dc\") " pod="openstack/ceilometer-0" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.634816 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4c578ce9-98b9-44f5-b090-c70d836bd2dc-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4c578ce9-98b9-44f5-b090-c70d836bd2dc\") " pod="openstack/ceilometer-0" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.634866 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c578ce9-98b9-44f5-b090-c70d836bd2dc-config-data\") pod \"ceilometer-0\" (UID: \"4c578ce9-98b9-44f5-b090-c70d836bd2dc\") " pod="openstack/ceilometer-0" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.634888 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4c578ce9-98b9-44f5-b090-c70d836bd2dc-run-httpd\") pod \"ceilometer-0\" (UID: \"4c578ce9-98b9-44f5-b090-c70d836bd2dc\") " pod="openstack/ceilometer-0" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.634910 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c578ce9-98b9-44f5-b090-c70d836bd2dc-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4c578ce9-98b9-44f5-b090-c70d836bd2dc\") " pod="openstack/ceilometer-0" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.634950 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4c578ce9-98b9-44f5-b090-c70d836bd2dc-log-httpd\") pod \"ceilometer-0\" (UID: \"4c578ce9-98b9-44f5-b090-c70d836bd2dc\") " pod="openstack/ceilometer-0" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.635075 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c578ce9-98b9-44f5-b090-c70d836bd2dc-scripts\") pod \"ceilometer-0\" (UID: \"4c578ce9-98b9-44f5-b090-c70d836bd2dc\") " pod="openstack/ceilometer-0" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.635141 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x8vjg\" (UniqueName: \"kubernetes.io/projected/4c578ce9-98b9-44f5-b090-c70d836bd2dc-kube-api-access-x8vjg\") pod \"ceilometer-0\" (UID: \"4c578ce9-98b9-44f5-b090-c70d836bd2dc\") " pod="openstack/ceilometer-0" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.636203 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4c578ce9-98b9-44f5-b090-c70d836bd2dc-run-httpd\") pod \"ceilometer-0\" (UID: \"4c578ce9-98b9-44f5-b090-c70d836bd2dc\") " pod="openstack/ceilometer-0" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.636592 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4c578ce9-98b9-44f5-b090-c70d836bd2dc-log-httpd\") pod \"ceilometer-0\" (UID: \"4c578ce9-98b9-44f5-b090-c70d836bd2dc\") " pod="openstack/ceilometer-0" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.640459 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4c578ce9-98b9-44f5-b090-c70d836bd2dc-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4c578ce9-98b9-44f5-b090-c70d836bd2dc\") " pod="openstack/ceilometer-0" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.640905 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c578ce9-98b9-44f5-b090-c70d836bd2dc-config-data\") pod \"ceilometer-0\" (UID: \"4c578ce9-98b9-44f5-b090-c70d836bd2dc\") " pod="openstack/ceilometer-0" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.641202 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c578ce9-98b9-44f5-b090-c70d836bd2dc-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4c578ce9-98b9-44f5-b090-c70d836bd2dc\") " pod="openstack/ceilometer-0" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.642488 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c578ce9-98b9-44f5-b090-c70d836bd2dc-scripts\") pod \"ceilometer-0\" (UID: \"4c578ce9-98b9-44f5-b090-c70d836bd2dc\") " pod="openstack/ceilometer-0" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.653592 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x8vjg\" (UniqueName: \"kubernetes.io/projected/4c578ce9-98b9-44f5-b090-c70d836bd2dc-kube-api-access-x8vjg\") pod \"ceilometer-0\" (UID: \"4c578ce9-98b9-44f5-b090-c70d836bd2dc\") " pod="openstack/ceilometer-0" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.672766 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:58:56 crc kubenswrapper[4854]: I1125 09:58:56.784777 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-84d577c77b-wtjcw"] Nov 25 09:58:56 crc kubenswrapper[4854]: W1125 09:58:56.789531 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2981e6fc_b8dc_45b4_a42d_4ccbd0372287.slice/crio-b19043b8e6666132b5e5a19bd16964672de2f1bf7c24609ec24a3d558d32dc71 WatchSource:0}: Error finding container b19043b8e6666132b5e5a19bd16964672de2f1bf7c24609ec24a3d558d32dc71: Status 404 returned error can't find the container with id b19043b8e6666132b5e5a19bd16964672de2f1bf7c24609ec24a3d558d32dc71 Nov 25 09:58:57 crc kubenswrapper[4854]: I1125 09:58:57.042703 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b33c7d9f-3d87-473f-b442-faeffed65c62" path="/var/lib/kubelet/pods/b33c7d9f-3d87-473f-b442-faeffed65c62/volumes" Nov 25 09:58:57 crc kubenswrapper[4854]: I1125 09:58:57.044642 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c7153c0a-3527-49ba-a5f2-0b6f5b1b219d" path="/var/lib/kubelet/pods/c7153c0a-3527-49ba-a5f2-0b6f5b1b219d/volumes" Nov 25 09:58:57 crc kubenswrapper[4854]: I1125 09:58:57.166949 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-84d577c77b-wtjcw" event={"ID":"2981e6fc-b8dc-45b4-a42d-4ccbd0372287","Type":"ContainerStarted","Data":"b19043b8e6666132b5e5a19bd16964672de2f1bf7c24609ec24a3d558d32dc71"} Nov 25 09:58:57 crc kubenswrapper[4854]: I1125 09:58:57.191214 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-l67tz" event={"ID":"5ed971a6-e96d-409b-af33-1016e52a207f","Type":"ContainerStarted","Data":"e5f32de72db68fb2febcc995379ea1797a1943daefae6f7e6ab7928d519f5f4c"} Nov 25 09:58:57 crc kubenswrapper[4854]: I1125 09:58:57.191255 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-l67tz" event={"ID":"5ed971a6-e96d-409b-af33-1016e52a207f","Type":"ContainerStarted","Data":"cbc86f49e547b302991c2eb7befafd197a0e870c1bf876c147bc12c1530f2d2b"} Nov 25 09:58:57 crc kubenswrapper[4854]: I1125 09:58:57.347179 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:58:58 crc kubenswrapper[4854]: I1125 09:58:58.175272 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-6f86b9df97-x4wkv"] Nov 25 09:58:58 crc kubenswrapper[4854]: I1125 09:58:58.177910 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6f86b9df97-x4wkv" Nov 25 09:58:58 crc kubenswrapper[4854]: I1125 09:58:58.180597 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 25 09:58:58 crc kubenswrapper[4854]: I1125 09:58:58.180596 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 25 09:58:58 crc kubenswrapper[4854]: I1125 09:58:58.206044 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6f86b9df97-x4wkv"] Nov 25 09:58:58 crc kubenswrapper[4854]: I1125 09:58:58.270939 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-84d577c77b-wtjcw" event={"ID":"2981e6fc-b8dc-45b4-a42d-4ccbd0372287","Type":"ContainerStarted","Data":"5d12e19861ad5832516713658c0d455b1416fa18edab655544cde2f7eafc8465"} Nov 25 09:58:58 crc kubenswrapper[4854]: I1125 09:58:58.270984 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-84d577c77b-wtjcw" event={"ID":"2981e6fc-b8dc-45b4-a42d-4ccbd0372287","Type":"ContainerStarted","Data":"65334c978879c29700e5cd3a28133a620396a73bce8704b9373c86d368830799"} Nov 25 09:58:58 crc kubenswrapper[4854]: I1125 09:58:58.272843 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-84d577c77b-wtjcw" Nov 25 09:58:58 crc kubenswrapper[4854]: I1125 09:58:58.297197 4854 generic.go:334] "Generic (PLEG): container finished" podID="5ed971a6-e96d-409b-af33-1016e52a207f" containerID="e5f32de72db68fb2febcc995379ea1797a1943daefae6f7e6ab7928d519f5f4c" exitCode=0 Nov 25 09:58:58 crc kubenswrapper[4854]: I1125 09:58:58.297434 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-l67tz" event={"ID":"5ed971a6-e96d-409b-af33-1016e52a207f","Type":"ContainerDied","Data":"e5f32de72db68fb2febcc995379ea1797a1943daefae6f7e6ab7928d519f5f4c"} Nov 25 09:58:58 crc kubenswrapper[4854]: I1125 09:58:58.326953 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4c578ce9-98b9-44f5-b090-c70d836bd2dc","Type":"ContainerStarted","Data":"1d75ba3da4d66dde34223ad650be29be4bfc12123b834f3d5eeca086f79037f0"} Nov 25 09:58:58 crc kubenswrapper[4854]: I1125 09:58:58.356387 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/d0ba70f0-4fdd-461d-a0be-4af340f425a0-httpd-config\") pod \"neutron-6f86b9df97-x4wkv\" (UID: \"d0ba70f0-4fdd-461d-a0be-4af340f425a0\") " pod="openstack/neutron-6f86b9df97-x4wkv" Nov 25 09:58:58 crc kubenswrapper[4854]: I1125 09:58:58.356425 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d0ba70f0-4fdd-461d-a0be-4af340f425a0-internal-tls-certs\") pod \"neutron-6f86b9df97-x4wkv\" (UID: \"d0ba70f0-4fdd-461d-a0be-4af340f425a0\") " pod="openstack/neutron-6f86b9df97-x4wkv" Nov 25 09:58:58 crc kubenswrapper[4854]: I1125 09:58:58.356496 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0ba70f0-4fdd-461d-a0be-4af340f425a0-combined-ca-bundle\") pod \"neutron-6f86b9df97-x4wkv\" (UID: \"d0ba70f0-4fdd-461d-a0be-4af340f425a0\") " pod="openstack/neutron-6f86b9df97-x4wkv" Nov 25 09:58:58 crc kubenswrapper[4854]: I1125 09:58:58.356534 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/d0ba70f0-4fdd-461d-a0be-4af340f425a0-config\") pod \"neutron-6f86b9df97-x4wkv\" (UID: \"d0ba70f0-4fdd-461d-a0be-4af340f425a0\") " pod="openstack/neutron-6f86b9df97-x4wkv" Nov 25 09:58:58 crc kubenswrapper[4854]: I1125 09:58:58.356602 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d0ba70f0-4fdd-461d-a0be-4af340f425a0-ovndb-tls-certs\") pod \"neutron-6f86b9df97-x4wkv\" (UID: \"d0ba70f0-4fdd-461d-a0be-4af340f425a0\") " pod="openstack/neutron-6f86b9df97-x4wkv" Nov 25 09:58:58 crc kubenswrapper[4854]: I1125 09:58:58.356620 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d0ba70f0-4fdd-461d-a0be-4af340f425a0-public-tls-certs\") pod \"neutron-6f86b9df97-x4wkv\" (UID: \"d0ba70f0-4fdd-461d-a0be-4af340f425a0\") " pod="openstack/neutron-6f86b9df97-x4wkv" Nov 25 09:58:58 crc kubenswrapper[4854]: I1125 09:58:58.356831 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hjxbw\" (UniqueName: \"kubernetes.io/projected/d0ba70f0-4fdd-461d-a0be-4af340f425a0-kube-api-access-hjxbw\") pod \"neutron-6f86b9df97-x4wkv\" (UID: \"d0ba70f0-4fdd-461d-a0be-4af340f425a0\") " pod="openstack/neutron-6f86b9df97-x4wkv" Nov 25 09:58:58 crc kubenswrapper[4854]: I1125 09:58:58.390384 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-84d577c77b-wtjcw" podStartSLOduration=3.390361081 podStartE2EDuration="3.390361081s" podCreationTimestamp="2025-11-25 09:58:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:58:58.326119982 +0000 UTC m=+1344.179113358" watchObservedRunningTime="2025-11-25 09:58:58.390361081 +0000 UTC m=+1344.243354457" Nov 25 09:58:58 crc kubenswrapper[4854]: I1125 09:58:58.466560 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/d0ba70f0-4fdd-461d-a0be-4af340f425a0-httpd-config\") pod \"neutron-6f86b9df97-x4wkv\" (UID: \"d0ba70f0-4fdd-461d-a0be-4af340f425a0\") " pod="openstack/neutron-6f86b9df97-x4wkv" Nov 25 09:58:58 crc kubenswrapper[4854]: I1125 09:58:58.466596 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d0ba70f0-4fdd-461d-a0be-4af340f425a0-internal-tls-certs\") pod \"neutron-6f86b9df97-x4wkv\" (UID: \"d0ba70f0-4fdd-461d-a0be-4af340f425a0\") " pod="openstack/neutron-6f86b9df97-x4wkv" Nov 25 09:58:58 crc kubenswrapper[4854]: I1125 09:58:58.466643 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0ba70f0-4fdd-461d-a0be-4af340f425a0-combined-ca-bundle\") pod \"neutron-6f86b9df97-x4wkv\" (UID: \"d0ba70f0-4fdd-461d-a0be-4af340f425a0\") " pod="openstack/neutron-6f86b9df97-x4wkv" Nov 25 09:58:58 crc kubenswrapper[4854]: I1125 09:58:58.466683 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/d0ba70f0-4fdd-461d-a0be-4af340f425a0-config\") pod \"neutron-6f86b9df97-x4wkv\" (UID: \"d0ba70f0-4fdd-461d-a0be-4af340f425a0\") " pod="openstack/neutron-6f86b9df97-x4wkv" Nov 25 09:58:58 crc kubenswrapper[4854]: I1125 09:58:58.466718 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d0ba70f0-4fdd-461d-a0be-4af340f425a0-ovndb-tls-certs\") pod \"neutron-6f86b9df97-x4wkv\" (UID: \"d0ba70f0-4fdd-461d-a0be-4af340f425a0\") " pod="openstack/neutron-6f86b9df97-x4wkv" Nov 25 09:58:58 crc kubenswrapper[4854]: I1125 09:58:58.466737 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d0ba70f0-4fdd-461d-a0be-4af340f425a0-public-tls-certs\") pod \"neutron-6f86b9df97-x4wkv\" (UID: \"d0ba70f0-4fdd-461d-a0be-4af340f425a0\") " pod="openstack/neutron-6f86b9df97-x4wkv" Nov 25 09:58:58 crc kubenswrapper[4854]: I1125 09:58:58.466843 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hjxbw\" (UniqueName: \"kubernetes.io/projected/d0ba70f0-4fdd-461d-a0be-4af340f425a0-kube-api-access-hjxbw\") pod \"neutron-6f86b9df97-x4wkv\" (UID: \"d0ba70f0-4fdd-461d-a0be-4af340f425a0\") " pod="openstack/neutron-6f86b9df97-x4wkv" Nov 25 09:58:58 crc kubenswrapper[4854]: I1125 09:58:58.491307 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d0ba70f0-4fdd-461d-a0be-4af340f425a0-ovndb-tls-certs\") pod \"neutron-6f86b9df97-x4wkv\" (UID: \"d0ba70f0-4fdd-461d-a0be-4af340f425a0\") " pod="openstack/neutron-6f86b9df97-x4wkv" Nov 25 09:58:58 crc kubenswrapper[4854]: I1125 09:58:58.492334 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0ba70f0-4fdd-461d-a0be-4af340f425a0-combined-ca-bundle\") pod \"neutron-6f86b9df97-x4wkv\" (UID: \"d0ba70f0-4fdd-461d-a0be-4af340f425a0\") " pod="openstack/neutron-6f86b9df97-x4wkv" Nov 25 09:58:58 crc kubenswrapper[4854]: I1125 09:58:58.493011 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d0ba70f0-4fdd-461d-a0be-4af340f425a0-internal-tls-certs\") pod \"neutron-6f86b9df97-x4wkv\" (UID: \"d0ba70f0-4fdd-461d-a0be-4af340f425a0\") " pod="openstack/neutron-6f86b9df97-x4wkv" Nov 25 09:58:58 crc kubenswrapper[4854]: I1125 09:58:58.493449 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d0ba70f0-4fdd-461d-a0be-4af340f425a0-public-tls-certs\") pod \"neutron-6f86b9df97-x4wkv\" (UID: \"d0ba70f0-4fdd-461d-a0be-4af340f425a0\") " pod="openstack/neutron-6f86b9df97-x4wkv" Nov 25 09:58:58 crc kubenswrapper[4854]: I1125 09:58:58.504827 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjxbw\" (UniqueName: \"kubernetes.io/projected/d0ba70f0-4fdd-461d-a0be-4af340f425a0-kube-api-access-hjxbw\") pod \"neutron-6f86b9df97-x4wkv\" (UID: \"d0ba70f0-4fdd-461d-a0be-4af340f425a0\") " pod="openstack/neutron-6f86b9df97-x4wkv" Nov 25 09:58:58 crc kubenswrapper[4854]: I1125 09:58:58.520964 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/d0ba70f0-4fdd-461d-a0be-4af340f425a0-config\") pod \"neutron-6f86b9df97-x4wkv\" (UID: \"d0ba70f0-4fdd-461d-a0be-4af340f425a0\") " pod="openstack/neutron-6f86b9df97-x4wkv" Nov 25 09:58:58 crc kubenswrapper[4854]: I1125 09:58:58.545093 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/d0ba70f0-4fdd-461d-a0be-4af340f425a0-httpd-config\") pod \"neutron-6f86b9df97-x4wkv\" (UID: \"d0ba70f0-4fdd-461d-a0be-4af340f425a0\") " pod="openstack/neutron-6f86b9df97-x4wkv" Nov 25 09:58:58 crc kubenswrapper[4854]: I1125 09:58:58.795495 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6f86b9df97-x4wkv" Nov 25 09:58:59 crc kubenswrapper[4854]: I1125 09:58:59.341098 4854 generic.go:334] "Generic (PLEG): container finished" podID="4f828059-1092-45cd-99a8-3915b6bab37f" containerID="ed75b623deadff67db66cc75a930a45b815889596ca105d7959096e1f197f93d" exitCode=0 Nov 25 09:58:59 crc kubenswrapper[4854]: I1125 09:58:59.341323 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-p594s" event={"ID":"4f828059-1092-45cd-99a8-3915b6bab37f","Type":"ContainerDied","Data":"ed75b623deadff67db66cc75a930a45b815889596ca105d7959096e1f197f93d"} Nov 25 09:58:59 crc kubenswrapper[4854]: I1125 09:58:59.344075 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-l67tz" event={"ID":"5ed971a6-e96d-409b-af33-1016e52a207f","Type":"ContainerStarted","Data":"f4bd7dcd6a3a6f0a10783c43cb0c6743e72d0cf19ee892df5226a7b29f6efc1f"} Nov 25 09:58:59 crc kubenswrapper[4854]: I1125 09:58:59.344990 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-848cf88cfc-l67tz" Nov 25 09:58:59 crc kubenswrapper[4854]: I1125 09:58:59.359625 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4c578ce9-98b9-44f5-b090-c70d836bd2dc","Type":"ContainerStarted","Data":"07fa6808e7d442837eec22a291d7d4ef60ad575fcc9f76e36725af60420ee6f9"} Nov 25 09:58:59 crc kubenswrapper[4854]: I1125 09:58:59.359685 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4c578ce9-98b9-44f5-b090-c70d836bd2dc","Type":"ContainerStarted","Data":"cbe098ce8302ae887e0eaec0ea168c1c2ba324dfeba4bce7dff301b0f6f40221"} Nov 25 09:58:59 crc kubenswrapper[4854]: I1125 09:58:59.424229 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-848cf88cfc-l67tz" podStartSLOduration=4.424206891 podStartE2EDuration="4.424206891s" podCreationTimestamp="2025-11-25 09:58:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:58:59.404032856 +0000 UTC m=+1345.257026252" watchObservedRunningTime="2025-11-25 09:58:59.424206891 +0000 UTC m=+1345.277200267" Nov 25 09:58:59 crc kubenswrapper[4854]: I1125 09:58:59.477834 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6f86b9df97-x4wkv"] Nov 25 09:58:59 crc kubenswrapper[4854]: I1125 09:58:59.681031 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-dd6ff4df6-gfkgf" Nov 25 09:58:59 crc kubenswrapper[4854]: I1125 09:58:59.885868 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-dd6ff4df6-gfkgf" Nov 25 09:58:59 crc kubenswrapper[4854]: I1125 09:58:59.943140 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-6ccd654df4-nc9k5"] Nov 25 09:58:59 crc kubenswrapper[4854]: I1125 09:58:59.943720 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-6ccd654df4-nc9k5" podUID="c0c3d44b-bac5-40fd-9edd-b6ee78913589" containerName="barbican-api-log" containerID="cri-o://4c84c9a8a19f148b87f3870f4fa29699562bf00f45ca8675e43ba44ff1b39041" gracePeriod=30 Nov 25 09:58:59 crc kubenswrapper[4854]: I1125 09:58:59.944161 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-6ccd654df4-nc9k5" podUID="c0c3d44b-bac5-40fd-9edd-b6ee78913589" containerName="barbican-api" containerID="cri-o://e1cb8af2ade8375c3a0b844d1c55892e9de4a7ece0369d8de7777968d354e756" gracePeriod=30 Nov 25 09:59:00 crc kubenswrapper[4854]: I1125 09:59:00.452599 4854 generic.go:334] "Generic (PLEG): container finished" podID="c0c3d44b-bac5-40fd-9edd-b6ee78913589" containerID="4c84c9a8a19f148b87f3870f4fa29699562bf00f45ca8675e43ba44ff1b39041" exitCode=143 Nov 25 09:59:00 crc kubenswrapper[4854]: I1125 09:59:00.452961 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6ccd654df4-nc9k5" event={"ID":"c0c3d44b-bac5-40fd-9edd-b6ee78913589","Type":"ContainerDied","Data":"4c84c9a8a19f148b87f3870f4fa29699562bf00f45ca8675e43ba44ff1b39041"} Nov 25 09:59:00 crc kubenswrapper[4854]: I1125 09:59:00.471511 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6f86b9df97-x4wkv" event={"ID":"d0ba70f0-4fdd-461d-a0be-4af340f425a0","Type":"ContainerStarted","Data":"0cd8e765996a020323de3f0b6fc6078af17455442f6f9344b31ca5c16005f0f9"} Nov 25 09:59:00 crc kubenswrapper[4854]: I1125 09:59:00.471559 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6f86b9df97-x4wkv" event={"ID":"d0ba70f0-4fdd-461d-a0be-4af340f425a0","Type":"ContainerStarted","Data":"7ad68987782e53585e33a80cc43adbf6732483492f95f999f8f7c1cf1a0b1660"} Nov 25 09:59:00 crc kubenswrapper[4854]: I1125 09:59:00.473986 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4c578ce9-98b9-44f5-b090-c70d836bd2dc","Type":"ContainerStarted","Data":"f645ee2dd0ff6e6daa1dd8baf729653032b04a2e2a3920c7a458df3a63e983b3"} Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.112178 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-p594s" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.151007 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4f828059-1092-45cd-99a8-3915b6bab37f-db-sync-config-data\") pod \"4f828059-1092-45cd-99a8-3915b6bab37f\" (UID: \"4f828059-1092-45cd-99a8-3915b6bab37f\") " Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.151120 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l4dnk\" (UniqueName: \"kubernetes.io/projected/4f828059-1092-45cd-99a8-3915b6bab37f-kube-api-access-l4dnk\") pod \"4f828059-1092-45cd-99a8-3915b6bab37f\" (UID: \"4f828059-1092-45cd-99a8-3915b6bab37f\") " Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.151183 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f828059-1092-45cd-99a8-3915b6bab37f-combined-ca-bundle\") pod \"4f828059-1092-45cd-99a8-3915b6bab37f\" (UID: \"4f828059-1092-45cd-99a8-3915b6bab37f\") " Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.151247 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f828059-1092-45cd-99a8-3915b6bab37f-config-data\") pod \"4f828059-1092-45cd-99a8-3915b6bab37f\" (UID: \"4f828059-1092-45cd-99a8-3915b6bab37f\") " Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.151278 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4f828059-1092-45cd-99a8-3915b6bab37f-scripts\") pod \"4f828059-1092-45cd-99a8-3915b6bab37f\" (UID: \"4f828059-1092-45cd-99a8-3915b6bab37f\") " Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.151346 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4f828059-1092-45cd-99a8-3915b6bab37f-etc-machine-id\") pod \"4f828059-1092-45cd-99a8-3915b6bab37f\" (UID: \"4f828059-1092-45cd-99a8-3915b6bab37f\") " Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.153454 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4f828059-1092-45cd-99a8-3915b6bab37f-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "4f828059-1092-45cd-99a8-3915b6bab37f" (UID: "4f828059-1092-45cd-99a8-3915b6bab37f"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.161826 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f828059-1092-45cd-99a8-3915b6bab37f-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "4f828059-1092-45cd-99a8-3915b6bab37f" (UID: "4f828059-1092-45cd-99a8-3915b6bab37f"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.168044 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f828059-1092-45cd-99a8-3915b6bab37f-kube-api-access-l4dnk" (OuterVolumeSpecName: "kube-api-access-l4dnk") pod "4f828059-1092-45cd-99a8-3915b6bab37f" (UID: "4f828059-1092-45cd-99a8-3915b6bab37f"). InnerVolumeSpecName "kube-api-access-l4dnk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.176871 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f828059-1092-45cd-99a8-3915b6bab37f-scripts" (OuterVolumeSpecName: "scripts") pod "4f828059-1092-45cd-99a8-3915b6bab37f" (UID: "4f828059-1092-45cd-99a8-3915b6bab37f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.213700 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f828059-1092-45cd-99a8-3915b6bab37f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4f828059-1092-45cd-99a8-3915b6bab37f" (UID: "4f828059-1092-45cd-99a8-3915b6bab37f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.259974 4854 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4f828059-1092-45cd-99a8-3915b6bab37f-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.260008 4854 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4f828059-1092-45cd-99a8-3915b6bab37f-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.260019 4854 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4f828059-1092-45cd-99a8-3915b6bab37f-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.260029 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l4dnk\" (UniqueName: \"kubernetes.io/projected/4f828059-1092-45cd-99a8-3915b6bab37f-kube-api-access-l4dnk\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.260038 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f828059-1092-45cd-99a8-3915b6bab37f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.277834 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f828059-1092-45cd-99a8-3915b6bab37f-config-data" (OuterVolumeSpecName: "config-data") pod "4f828059-1092-45cd-99a8-3915b6bab37f" (UID: "4f828059-1092-45cd-99a8-3915b6bab37f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.362062 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f828059-1092-45cd-99a8-3915b6bab37f-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.497151 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6f86b9df97-x4wkv" event={"ID":"d0ba70f0-4fdd-461d-a0be-4af340f425a0","Type":"ContainerStarted","Data":"02164ffa73f3fe87459e724e5ce35e2e80298f286d300d120eb8b6e9d48bd945"} Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.498519 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-6f86b9df97-x4wkv" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.511010 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-p594s" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.511819 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-p594s" event={"ID":"4f828059-1092-45cd-99a8-3915b6bab37f","Type":"ContainerDied","Data":"5717fdbad0c1b64a48246b113f43120482ab1e3ff446dac18ab02d6733402b08"} Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.511860 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5717fdbad0c1b64a48246b113f43120482ab1e3ff446dac18ab02d6733402b08" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.545168 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-6f86b9df97-x4wkv" podStartSLOduration=3.545137329 podStartE2EDuration="3.545137329s" podCreationTimestamp="2025-11-25 09:58:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:59:01.537367905 +0000 UTC m=+1347.390361291" watchObservedRunningTime="2025-11-25 09:59:01.545137329 +0000 UTC m=+1347.398130705" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.723294 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 09:59:01 crc kubenswrapper[4854]: E1125 09:59:01.723771 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f828059-1092-45cd-99a8-3915b6bab37f" containerName="cinder-db-sync" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.723783 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f828059-1092-45cd-99a8-3915b6bab37f" containerName="cinder-db-sync" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.724038 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f828059-1092-45cd-99a8-3915b6bab37f" containerName="cinder-db-sync" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.725334 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.738718 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.738986 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.739170 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-x5m65" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.742551 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.769868 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.782205 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c7418fe-3812-4ca4-961a-01933db11279-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"2c7418fe-3812-4ca4-961a-01933db11279\") " pod="openstack/cinder-scheduler-0" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.782275 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c7418fe-3812-4ca4-961a-01933db11279-config-data\") pod \"cinder-scheduler-0\" (UID: \"2c7418fe-3812-4ca4-961a-01933db11279\") " pod="openstack/cinder-scheduler-0" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.782333 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2c7418fe-3812-4ca4-961a-01933db11279-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"2c7418fe-3812-4ca4-961a-01933db11279\") " pod="openstack/cinder-scheduler-0" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.782350 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2c7418fe-3812-4ca4-961a-01933db11279-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"2c7418fe-3812-4ca4-961a-01933db11279\") " pod="openstack/cinder-scheduler-0" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.782503 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xwxdw\" (UniqueName: \"kubernetes.io/projected/2c7418fe-3812-4ca4-961a-01933db11279-kube-api-access-xwxdw\") pod \"cinder-scheduler-0\" (UID: \"2c7418fe-3812-4ca4-961a-01933db11279\") " pod="openstack/cinder-scheduler-0" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.782627 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2c7418fe-3812-4ca4-961a-01933db11279-scripts\") pod \"cinder-scheduler-0\" (UID: \"2c7418fe-3812-4ca4-961a-01933db11279\") " pod="openstack/cinder-scheduler-0" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.816730 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-l67tz"] Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.887132 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2c7418fe-3812-4ca4-961a-01933db11279-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"2c7418fe-3812-4ca4-961a-01933db11279\") " pod="openstack/cinder-scheduler-0" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.887424 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2c7418fe-3812-4ca4-961a-01933db11279-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"2c7418fe-3812-4ca4-961a-01933db11279\") " pod="openstack/cinder-scheduler-0" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.887476 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xwxdw\" (UniqueName: \"kubernetes.io/projected/2c7418fe-3812-4ca4-961a-01933db11279-kube-api-access-xwxdw\") pod \"cinder-scheduler-0\" (UID: \"2c7418fe-3812-4ca4-961a-01933db11279\") " pod="openstack/cinder-scheduler-0" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.887528 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2c7418fe-3812-4ca4-961a-01933db11279-scripts\") pod \"cinder-scheduler-0\" (UID: \"2c7418fe-3812-4ca4-961a-01933db11279\") " pod="openstack/cinder-scheduler-0" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.887621 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c7418fe-3812-4ca4-961a-01933db11279-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"2c7418fe-3812-4ca4-961a-01933db11279\") " pod="openstack/cinder-scheduler-0" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.887664 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c7418fe-3812-4ca4-961a-01933db11279-config-data\") pod \"cinder-scheduler-0\" (UID: \"2c7418fe-3812-4ca4-961a-01933db11279\") " pod="openstack/cinder-scheduler-0" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.889360 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2c7418fe-3812-4ca4-961a-01933db11279-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"2c7418fe-3812-4ca4-961a-01933db11279\") " pod="openstack/cinder-scheduler-0" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.909746 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-wrpfb"] Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.911671 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-wrpfb" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.912132 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c7418fe-3812-4ca4-961a-01933db11279-config-data\") pod \"cinder-scheduler-0\" (UID: \"2c7418fe-3812-4ca4-961a-01933db11279\") " pod="openstack/cinder-scheduler-0" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.941807 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2c7418fe-3812-4ca4-961a-01933db11279-scripts\") pod \"cinder-scheduler-0\" (UID: \"2c7418fe-3812-4ca4-961a-01933db11279\") " pod="openstack/cinder-scheduler-0" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.942194 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c7418fe-3812-4ca4-961a-01933db11279-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"2c7418fe-3812-4ca4-961a-01933db11279\") " pod="openstack/cinder-scheduler-0" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.949277 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2c7418fe-3812-4ca4-961a-01933db11279-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"2c7418fe-3812-4ca4-961a-01933db11279\") " pod="openstack/cinder-scheduler-0" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.949913 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xwxdw\" (UniqueName: \"kubernetes.io/projected/2c7418fe-3812-4ca4-961a-01933db11279-kube-api-access-xwxdw\") pod \"cinder-scheduler-0\" (UID: \"2c7418fe-3812-4ca4-961a-01933db11279\") " pod="openstack/cinder-scheduler-0" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.952737 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.955120 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.960032 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.987758 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.989075 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f7c0062e-6699-45fa-a1fb-a9efb44a80e5-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-wrpfb\" (UID: \"f7c0062e-6699-45fa-a1fb-a9efb44a80e5\") " pod="openstack/dnsmasq-dns-6578955fd5-wrpfb" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.989130 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f7c0062e-6699-45fa-a1fb-a9efb44a80e5-dns-svc\") pod \"dnsmasq-dns-6578955fd5-wrpfb\" (UID: \"f7c0062e-6699-45fa-a1fb-a9efb44a80e5\") " pod="openstack/dnsmasq-dns-6578955fd5-wrpfb" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.989177 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f7c0062e-6699-45fa-a1fb-a9efb44a80e5-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-wrpfb\" (UID: \"f7c0062e-6699-45fa-a1fb-a9efb44a80e5\") " pod="openstack/dnsmasq-dns-6578955fd5-wrpfb" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.989201 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7c0062e-6699-45fa-a1fb-a9efb44a80e5-config\") pod \"dnsmasq-dns-6578955fd5-wrpfb\" (UID: \"f7c0062e-6699-45fa-a1fb-a9efb44a80e5\") " pod="openstack/dnsmasq-dns-6578955fd5-wrpfb" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.989242 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d4h4t\" (UniqueName: \"kubernetes.io/projected/f7c0062e-6699-45fa-a1fb-a9efb44a80e5-kube-api-access-d4h4t\") pod \"dnsmasq-dns-6578955fd5-wrpfb\" (UID: \"f7c0062e-6699-45fa-a1fb-a9efb44a80e5\") " pod="openstack/dnsmasq-dns-6578955fd5-wrpfb" Nov 25 09:59:01 crc kubenswrapper[4854]: I1125 09:59:01.989279 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f7c0062e-6699-45fa-a1fb-a9efb44a80e5-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-wrpfb\" (UID: \"f7c0062e-6699-45fa-a1fb-a9efb44a80e5\") " pod="openstack/dnsmasq-dns-6578955fd5-wrpfb" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.018740 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-wrpfb"] Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.054641 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.091112 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f7c0062e-6699-45fa-a1fb-a9efb44a80e5-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-wrpfb\" (UID: \"f7c0062e-6699-45fa-a1fb-a9efb44a80e5\") " pod="openstack/dnsmasq-dns-6578955fd5-wrpfb" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.091403 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vglh\" (UniqueName: \"kubernetes.io/projected/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-kube-api-access-8vglh\") pod \"cinder-api-0\" (UID: \"e9ffcff8-5138-4aff-b31c-178fbb3b54c5\") " pod="openstack/cinder-api-0" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.091536 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-config-data\") pod \"cinder-api-0\" (UID: \"e9ffcff8-5138-4aff-b31c-178fbb3b54c5\") " pod="openstack/cinder-api-0" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.091653 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-etc-machine-id\") pod \"cinder-api-0\" (UID: \"e9ffcff8-5138-4aff-b31c-178fbb3b54c5\") " pod="openstack/cinder-api-0" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.091775 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-logs\") pod \"cinder-api-0\" (UID: \"e9ffcff8-5138-4aff-b31c-178fbb3b54c5\") " pod="openstack/cinder-api-0" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.092705 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-config-data-custom\") pod \"cinder-api-0\" (UID: \"e9ffcff8-5138-4aff-b31c-178fbb3b54c5\") " pod="openstack/cinder-api-0" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.092817 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f7c0062e-6699-45fa-a1fb-a9efb44a80e5-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-wrpfb\" (UID: \"f7c0062e-6699-45fa-a1fb-a9efb44a80e5\") " pod="openstack/dnsmasq-dns-6578955fd5-wrpfb" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.092935 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f7c0062e-6699-45fa-a1fb-a9efb44a80e5-dns-svc\") pod \"dnsmasq-dns-6578955fd5-wrpfb\" (UID: \"f7c0062e-6699-45fa-a1fb-a9efb44a80e5\") " pod="openstack/dnsmasq-dns-6578955fd5-wrpfb" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.093058 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-scripts\") pod \"cinder-api-0\" (UID: \"e9ffcff8-5138-4aff-b31c-178fbb3b54c5\") " pod="openstack/cinder-api-0" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.093191 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f7c0062e-6699-45fa-a1fb-a9efb44a80e5-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-wrpfb\" (UID: \"f7c0062e-6699-45fa-a1fb-a9efb44a80e5\") " pod="openstack/dnsmasq-dns-6578955fd5-wrpfb" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.093303 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7c0062e-6699-45fa-a1fb-a9efb44a80e5-config\") pod \"dnsmasq-dns-6578955fd5-wrpfb\" (UID: \"f7c0062e-6699-45fa-a1fb-a9efb44a80e5\") " pod="openstack/dnsmasq-dns-6578955fd5-wrpfb" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.093397 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f7c0062e-6699-45fa-a1fb-a9efb44a80e5-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-wrpfb\" (UID: \"f7c0062e-6699-45fa-a1fb-a9efb44a80e5\") " pod="openstack/dnsmasq-dns-6578955fd5-wrpfb" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.092822 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f7c0062e-6699-45fa-a1fb-a9efb44a80e5-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-wrpfb\" (UID: \"f7c0062e-6699-45fa-a1fb-a9efb44a80e5\") " pod="openstack/dnsmasq-dns-6578955fd5-wrpfb" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.095118 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f7c0062e-6699-45fa-a1fb-a9efb44a80e5-dns-svc\") pod \"dnsmasq-dns-6578955fd5-wrpfb\" (UID: \"f7c0062e-6699-45fa-a1fb-a9efb44a80e5\") " pod="openstack/dnsmasq-dns-6578955fd5-wrpfb" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.095459 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d4h4t\" (UniqueName: \"kubernetes.io/projected/f7c0062e-6699-45fa-a1fb-a9efb44a80e5-kube-api-access-d4h4t\") pod \"dnsmasq-dns-6578955fd5-wrpfb\" (UID: \"f7c0062e-6699-45fa-a1fb-a9efb44a80e5\") " pod="openstack/dnsmasq-dns-6578955fd5-wrpfb" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.095498 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"e9ffcff8-5138-4aff-b31c-178fbb3b54c5\") " pod="openstack/cinder-api-0" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.095836 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7c0062e-6699-45fa-a1fb-a9efb44a80e5-config\") pod \"dnsmasq-dns-6578955fd5-wrpfb\" (UID: \"f7c0062e-6699-45fa-a1fb-a9efb44a80e5\") " pod="openstack/dnsmasq-dns-6578955fd5-wrpfb" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.096483 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f7c0062e-6699-45fa-a1fb-a9efb44a80e5-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-wrpfb\" (UID: \"f7c0062e-6699-45fa-a1fb-a9efb44a80e5\") " pod="openstack/dnsmasq-dns-6578955fd5-wrpfb" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.123604 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d4h4t\" (UniqueName: \"kubernetes.io/projected/f7c0062e-6699-45fa-a1fb-a9efb44a80e5-kube-api-access-d4h4t\") pod \"dnsmasq-dns-6578955fd5-wrpfb\" (UID: \"f7c0062e-6699-45fa-a1fb-a9efb44a80e5\") " pod="openstack/dnsmasq-dns-6578955fd5-wrpfb" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.197013 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-config-data\") pod \"cinder-api-0\" (UID: \"e9ffcff8-5138-4aff-b31c-178fbb3b54c5\") " pod="openstack/cinder-api-0" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.197291 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-etc-machine-id\") pod \"cinder-api-0\" (UID: \"e9ffcff8-5138-4aff-b31c-178fbb3b54c5\") " pod="openstack/cinder-api-0" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.197315 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-logs\") pod \"cinder-api-0\" (UID: \"e9ffcff8-5138-4aff-b31c-178fbb3b54c5\") " pod="openstack/cinder-api-0" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.197380 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-config-data-custom\") pod \"cinder-api-0\" (UID: \"e9ffcff8-5138-4aff-b31c-178fbb3b54c5\") " pod="openstack/cinder-api-0" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.197439 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-scripts\") pod \"cinder-api-0\" (UID: \"e9ffcff8-5138-4aff-b31c-178fbb3b54c5\") " pod="openstack/cinder-api-0" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.197505 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"e9ffcff8-5138-4aff-b31c-178fbb3b54c5\") " pod="openstack/cinder-api-0" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.197537 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vglh\" (UniqueName: \"kubernetes.io/projected/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-kube-api-access-8vglh\") pod \"cinder-api-0\" (UID: \"e9ffcff8-5138-4aff-b31c-178fbb3b54c5\") " pod="openstack/cinder-api-0" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.198091 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-logs\") pod \"cinder-api-0\" (UID: \"e9ffcff8-5138-4aff-b31c-178fbb3b54c5\") " pod="openstack/cinder-api-0" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.198214 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-etc-machine-id\") pod \"cinder-api-0\" (UID: \"e9ffcff8-5138-4aff-b31c-178fbb3b54c5\") " pod="openstack/cinder-api-0" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.199748 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-wrpfb" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.201807 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-config-data-custom\") pod \"cinder-api-0\" (UID: \"e9ffcff8-5138-4aff-b31c-178fbb3b54c5\") " pod="openstack/cinder-api-0" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.210574 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-config-data\") pod \"cinder-api-0\" (UID: \"e9ffcff8-5138-4aff-b31c-178fbb3b54c5\") " pod="openstack/cinder-api-0" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.216068 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"e9ffcff8-5138-4aff-b31c-178fbb3b54c5\") " pod="openstack/cinder-api-0" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.216353 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-scripts\") pod \"cinder-api-0\" (UID: \"e9ffcff8-5138-4aff-b31c-178fbb3b54c5\") " pod="openstack/cinder-api-0" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.230965 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vglh\" (UniqueName: \"kubernetes.io/projected/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-kube-api-access-8vglh\") pod \"cinder-api-0\" (UID: \"e9ffcff8-5138-4aff-b31c-178fbb3b54c5\") " pod="openstack/cinder-api-0" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.309364 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.568285 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-848cf88cfc-l67tz" podUID="5ed971a6-e96d-409b-af33-1016e52a207f" containerName="dnsmasq-dns" containerID="cri-o://f4bd7dcd6a3a6f0a10783c43cb0c6743e72d0cf19ee892df5226a7b29f6efc1f" gracePeriod=10 Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.569432 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4c578ce9-98b9-44f5-b090-c70d836bd2dc","Type":"ContainerStarted","Data":"a8e10cb8e7a05834745edafe9a3898e902c9ccc1ceccab9e38ac94e841a30c28"} Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.569459 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.601298 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.604995928 podStartE2EDuration="6.601278243s" podCreationTimestamp="2025-11-25 09:58:56 +0000 UTC" firstStartedPulling="2025-11-25 09:58:57.386312589 +0000 UTC m=+1343.239305965" lastFinishedPulling="2025-11-25 09:59:01.382594894 +0000 UTC m=+1347.235588280" observedRunningTime="2025-11-25 09:59:02.597737726 +0000 UTC m=+1348.450731112" watchObservedRunningTime="2025-11-25 09:59:02.601278243 +0000 UTC m=+1348.454271619" Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.739288 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 09:59:02 crc kubenswrapper[4854]: W1125 09:59:02.808876 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2c7418fe_3812_4ca4_961a_01933db11279.slice/crio-2254ab6c681ee729346d4ca9cbccba7fabde1b34701b3c39a63ab8d28552e670 WatchSource:0}: Error finding container 2254ab6c681ee729346d4ca9cbccba7fabde1b34701b3c39a63ab8d28552e670: Status 404 returned error can't find the container with id 2254ab6c681ee729346d4ca9cbccba7fabde1b34701b3c39a63ab8d28552e670 Nov 25 09:59:02 crc kubenswrapper[4854]: I1125 09:59:02.810131 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-c7l4t" podUID="bb7eee5b-75e6-483b-a68e-1d6e39814690" containerName="registry-server" probeResult="failure" output=< Nov 25 09:59:02 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 09:59:02 crc kubenswrapper[4854]: > Nov 25 09:59:03 crc kubenswrapper[4854]: I1125 09:59:03.004168 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-wrpfb"] Nov 25 09:59:03 crc kubenswrapper[4854]: W1125 09:59:03.022782 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf7c0062e_6699_45fa_a1fb_a9efb44a80e5.slice/crio-0a19ddae4c512a87d58e6cd6716c6896f5111471f666bd844f28bf3840fc37ab WatchSource:0}: Error finding container 0a19ddae4c512a87d58e6cd6716c6896f5111471f666bd844f28bf3840fc37ab: Status 404 returned error can't find the container with id 0a19ddae4c512a87d58e6cd6716c6896f5111471f666bd844f28bf3840fc37ab Nov 25 09:59:03 crc kubenswrapper[4854]: I1125 09:59:03.314399 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 09:59:03 crc kubenswrapper[4854]: I1125 09:59:03.633286 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-wrpfb" event={"ID":"f7c0062e-6699-45fa-a1fb-a9efb44a80e5","Type":"ContainerStarted","Data":"0a19ddae4c512a87d58e6cd6716c6896f5111471f666bd844f28bf3840fc37ab"} Nov 25 09:59:03 crc kubenswrapper[4854]: I1125 09:59:03.638148 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"e9ffcff8-5138-4aff-b31c-178fbb3b54c5","Type":"ContainerStarted","Data":"dcd84894704200ae2c8c02fb47551c224ebd919ede946a668a969d6138b5e4c7"} Nov 25 09:59:03 crc kubenswrapper[4854]: I1125 09:59:03.658875 4854 generic.go:334] "Generic (PLEG): container finished" podID="5ed971a6-e96d-409b-af33-1016e52a207f" containerID="f4bd7dcd6a3a6f0a10783c43cb0c6743e72d0cf19ee892df5226a7b29f6efc1f" exitCode=0 Nov 25 09:59:03 crc kubenswrapper[4854]: I1125 09:59:03.659138 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-l67tz" event={"ID":"5ed971a6-e96d-409b-af33-1016e52a207f","Type":"ContainerDied","Data":"f4bd7dcd6a3a6f0a10783c43cb0c6743e72d0cf19ee892df5226a7b29f6efc1f"} Nov 25 09:59:03 crc kubenswrapper[4854]: I1125 09:59:03.659175 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-l67tz" event={"ID":"5ed971a6-e96d-409b-af33-1016e52a207f","Type":"ContainerDied","Data":"cbc86f49e547b302991c2eb7befafd197a0e870c1bf876c147bc12c1530f2d2b"} Nov 25 09:59:03 crc kubenswrapper[4854]: I1125 09:59:03.659189 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cbc86f49e547b302991c2eb7befafd197a0e870c1bf876c147bc12c1530f2d2b" Nov 25 09:59:03 crc kubenswrapper[4854]: I1125 09:59:03.696432 4854 generic.go:334] "Generic (PLEG): container finished" podID="c0c3d44b-bac5-40fd-9edd-b6ee78913589" containerID="e1cb8af2ade8375c3a0b844d1c55892e9de4a7ece0369d8de7777968d354e756" exitCode=0 Nov 25 09:59:03 crc kubenswrapper[4854]: I1125 09:59:03.696493 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6ccd654df4-nc9k5" event={"ID":"c0c3d44b-bac5-40fd-9edd-b6ee78913589","Type":"ContainerDied","Data":"e1cb8af2ade8375c3a0b844d1c55892e9de4a7ece0369d8de7777968d354e756"} Nov 25 09:59:03 crc kubenswrapper[4854]: I1125 09:59:03.700935 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"2c7418fe-3812-4ca4-961a-01933db11279","Type":"ContainerStarted","Data":"2254ab6c681ee729346d4ca9cbccba7fabde1b34701b3c39a63ab8d28552e670"} Nov 25 09:59:03 crc kubenswrapper[4854]: I1125 09:59:03.776264 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-l67tz" Nov 25 09:59:03 crc kubenswrapper[4854]: I1125 09:59:03.853508 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5ed971a6-e96d-409b-af33-1016e52a207f-ovsdbserver-nb\") pod \"5ed971a6-e96d-409b-af33-1016e52a207f\" (UID: \"5ed971a6-e96d-409b-af33-1016e52a207f\") " Nov 25 09:59:03 crc kubenswrapper[4854]: I1125 09:59:03.854183 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5ed971a6-e96d-409b-af33-1016e52a207f-dns-swift-storage-0\") pod \"5ed971a6-e96d-409b-af33-1016e52a207f\" (UID: \"5ed971a6-e96d-409b-af33-1016e52a207f\") " Nov 25 09:59:03 crc kubenswrapper[4854]: I1125 09:59:03.854666 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ed971a6-e96d-409b-af33-1016e52a207f-config\") pod \"5ed971a6-e96d-409b-af33-1016e52a207f\" (UID: \"5ed971a6-e96d-409b-af33-1016e52a207f\") " Nov 25 09:59:03 crc kubenswrapper[4854]: I1125 09:59:03.854865 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5ed971a6-e96d-409b-af33-1016e52a207f-ovsdbserver-sb\") pod \"5ed971a6-e96d-409b-af33-1016e52a207f\" (UID: \"5ed971a6-e96d-409b-af33-1016e52a207f\") " Nov 25 09:59:03 crc kubenswrapper[4854]: I1125 09:59:03.855075 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9zq6j\" (UniqueName: \"kubernetes.io/projected/5ed971a6-e96d-409b-af33-1016e52a207f-kube-api-access-9zq6j\") pod \"5ed971a6-e96d-409b-af33-1016e52a207f\" (UID: \"5ed971a6-e96d-409b-af33-1016e52a207f\") " Nov 25 09:59:03 crc kubenswrapper[4854]: I1125 09:59:03.855221 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5ed971a6-e96d-409b-af33-1016e52a207f-dns-svc\") pod \"5ed971a6-e96d-409b-af33-1016e52a207f\" (UID: \"5ed971a6-e96d-409b-af33-1016e52a207f\") " Nov 25 09:59:03 crc kubenswrapper[4854]: I1125 09:59:03.957756 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ed971a6-e96d-409b-af33-1016e52a207f-kube-api-access-9zq6j" (OuterVolumeSpecName: "kube-api-access-9zq6j") pod "5ed971a6-e96d-409b-af33-1016e52a207f" (UID: "5ed971a6-e96d-409b-af33-1016e52a207f"). InnerVolumeSpecName "kube-api-access-9zq6j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:59:03 crc kubenswrapper[4854]: I1125 09:59:03.960219 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9zq6j\" (UniqueName: \"kubernetes.io/projected/5ed971a6-e96d-409b-af33-1016e52a207f-kube-api-access-9zq6j\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:04 crc kubenswrapper[4854]: I1125 09:59:04.001592 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5ed971a6-e96d-409b-af33-1016e52a207f-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "5ed971a6-e96d-409b-af33-1016e52a207f" (UID: "5ed971a6-e96d-409b-af33-1016e52a207f"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:59:04 crc kubenswrapper[4854]: I1125 09:59:04.065362 4854 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5ed971a6-e96d-409b-af33-1016e52a207f-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:04 crc kubenswrapper[4854]: I1125 09:59:04.091377 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5ed971a6-e96d-409b-af33-1016e52a207f-config" (OuterVolumeSpecName: "config") pod "5ed971a6-e96d-409b-af33-1016e52a207f" (UID: "5ed971a6-e96d-409b-af33-1016e52a207f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:59:04 crc kubenswrapper[4854]: I1125 09:59:04.092159 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5ed971a6-e96d-409b-af33-1016e52a207f-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "5ed971a6-e96d-409b-af33-1016e52a207f" (UID: "5ed971a6-e96d-409b-af33-1016e52a207f"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:59:04 crc kubenswrapper[4854]: I1125 09:59:04.124251 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5ed971a6-e96d-409b-af33-1016e52a207f-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "5ed971a6-e96d-409b-af33-1016e52a207f" (UID: "5ed971a6-e96d-409b-af33-1016e52a207f"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:59:04 crc kubenswrapper[4854]: I1125 09:59:04.146707 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5ed971a6-e96d-409b-af33-1016e52a207f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "5ed971a6-e96d-409b-af33-1016e52a207f" (UID: "5ed971a6-e96d-409b-af33-1016e52a207f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:59:04 crc kubenswrapper[4854]: I1125 09:59:04.155965 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6ccd654df4-nc9k5" Nov 25 09:59:04 crc kubenswrapper[4854]: I1125 09:59:04.179850 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c0c3d44b-bac5-40fd-9edd-b6ee78913589-config-data-custom\") pod \"c0c3d44b-bac5-40fd-9edd-b6ee78913589\" (UID: \"c0c3d44b-bac5-40fd-9edd-b6ee78913589\") " Nov 25 09:59:04 crc kubenswrapper[4854]: I1125 09:59:04.180006 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0c3d44b-bac5-40fd-9edd-b6ee78913589-config-data\") pod \"c0c3d44b-bac5-40fd-9edd-b6ee78913589\" (UID: \"c0c3d44b-bac5-40fd-9edd-b6ee78913589\") " Nov 25 09:59:04 crc kubenswrapper[4854]: I1125 09:59:04.180152 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c0c3d44b-bac5-40fd-9edd-b6ee78913589-logs\") pod \"c0c3d44b-bac5-40fd-9edd-b6ee78913589\" (UID: \"c0c3d44b-bac5-40fd-9edd-b6ee78913589\") " Nov 25 09:59:04 crc kubenswrapper[4854]: I1125 09:59:04.180279 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8k2v5\" (UniqueName: \"kubernetes.io/projected/c0c3d44b-bac5-40fd-9edd-b6ee78913589-kube-api-access-8k2v5\") pod \"c0c3d44b-bac5-40fd-9edd-b6ee78913589\" (UID: \"c0c3d44b-bac5-40fd-9edd-b6ee78913589\") " Nov 25 09:59:04 crc kubenswrapper[4854]: I1125 09:59:04.180393 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0c3d44b-bac5-40fd-9edd-b6ee78913589-combined-ca-bundle\") pod \"c0c3d44b-bac5-40fd-9edd-b6ee78913589\" (UID: \"c0c3d44b-bac5-40fd-9edd-b6ee78913589\") " Nov 25 09:59:04 crc kubenswrapper[4854]: I1125 09:59:04.181205 4854 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5ed971a6-e96d-409b-af33-1016e52a207f-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:04 crc kubenswrapper[4854]: I1125 09:59:04.181227 4854 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5ed971a6-e96d-409b-af33-1016e52a207f-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:04 crc kubenswrapper[4854]: I1125 09:59:04.181239 4854 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5ed971a6-e96d-409b-af33-1016e52a207f-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:04 crc kubenswrapper[4854]: I1125 09:59:04.181247 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ed971a6-e96d-409b-af33-1016e52a207f-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:04 crc kubenswrapper[4854]: I1125 09:59:04.182200 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c0c3d44b-bac5-40fd-9edd-b6ee78913589-logs" (OuterVolumeSpecName: "logs") pod "c0c3d44b-bac5-40fd-9edd-b6ee78913589" (UID: "c0c3d44b-bac5-40fd-9edd-b6ee78913589"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:59:04 crc kubenswrapper[4854]: I1125 09:59:04.199872 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0c3d44b-bac5-40fd-9edd-b6ee78913589-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "c0c3d44b-bac5-40fd-9edd-b6ee78913589" (UID: "c0c3d44b-bac5-40fd-9edd-b6ee78913589"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:04 crc kubenswrapper[4854]: I1125 09:59:04.218523 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c0c3d44b-bac5-40fd-9edd-b6ee78913589-kube-api-access-8k2v5" (OuterVolumeSpecName: "kube-api-access-8k2v5") pod "c0c3d44b-bac5-40fd-9edd-b6ee78913589" (UID: "c0c3d44b-bac5-40fd-9edd-b6ee78913589"). InnerVolumeSpecName "kube-api-access-8k2v5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:59:04 crc kubenswrapper[4854]: I1125 09:59:04.235814 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0c3d44b-bac5-40fd-9edd-b6ee78913589-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c0c3d44b-bac5-40fd-9edd-b6ee78913589" (UID: "c0c3d44b-bac5-40fd-9edd-b6ee78913589"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:04 crc kubenswrapper[4854]: I1125 09:59:04.293648 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0c3d44b-bac5-40fd-9edd-b6ee78913589-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:04 crc kubenswrapper[4854]: I1125 09:59:04.293921 4854 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c0c3d44b-bac5-40fd-9edd-b6ee78913589-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:04 crc kubenswrapper[4854]: I1125 09:59:04.293981 4854 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c0c3d44b-bac5-40fd-9edd-b6ee78913589-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:04 crc kubenswrapper[4854]: I1125 09:59:04.294037 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8k2v5\" (UniqueName: \"kubernetes.io/projected/c0c3d44b-bac5-40fd-9edd-b6ee78913589-kube-api-access-8k2v5\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:04 crc kubenswrapper[4854]: I1125 09:59:04.334831 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0c3d44b-bac5-40fd-9edd-b6ee78913589-config-data" (OuterVolumeSpecName: "config-data") pod "c0c3d44b-bac5-40fd-9edd-b6ee78913589" (UID: "c0c3d44b-bac5-40fd-9edd-b6ee78913589"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:04 crc kubenswrapper[4854]: I1125 09:59:04.395862 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0c3d44b-bac5-40fd-9edd-b6ee78913589-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:04 crc kubenswrapper[4854]: I1125 09:59:04.745693 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6ccd654df4-nc9k5" event={"ID":"c0c3d44b-bac5-40fd-9edd-b6ee78913589","Type":"ContainerDied","Data":"c4de81c7bbe2759f55db0e2224f5e8b8e0c778da56598b576569b0d341f70483"} Nov 25 09:59:04 crc kubenswrapper[4854]: I1125 09:59:04.746031 4854 scope.go:117] "RemoveContainer" containerID="e1cb8af2ade8375c3a0b844d1c55892e9de4a7ece0369d8de7777968d354e756" Nov 25 09:59:04 crc kubenswrapper[4854]: I1125 09:59:04.745807 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6ccd654df4-nc9k5" Nov 25 09:59:04 crc kubenswrapper[4854]: I1125 09:59:04.757271 4854 generic.go:334] "Generic (PLEG): container finished" podID="f7c0062e-6699-45fa-a1fb-a9efb44a80e5" containerID="eef673bc8954710d47f2671f29b11efa2f1d67980fc641ff528d145d0e5bd48b" exitCode=0 Nov 25 09:59:04 crc kubenswrapper[4854]: I1125 09:59:04.757937 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-wrpfb" event={"ID":"f7c0062e-6699-45fa-a1fb-a9efb44a80e5","Type":"ContainerDied","Data":"eef673bc8954710d47f2671f29b11efa2f1d67980fc641ff528d145d0e5bd48b"} Nov 25 09:59:04 crc kubenswrapper[4854]: I1125 09:59:04.758094 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-l67tz" Nov 25 09:59:04 crc kubenswrapper[4854]: I1125 09:59:04.844842 4854 scope.go:117] "RemoveContainer" containerID="4c84c9a8a19f148b87f3870f4fa29699562bf00f45ca8675e43ba44ff1b39041" Nov 25 09:59:04 crc kubenswrapper[4854]: I1125 09:59:04.983725 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-l67tz"] Nov 25 09:59:05 crc kubenswrapper[4854]: I1125 09:59:05.052037 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-l67tz"] Nov 25 09:59:05 crc kubenswrapper[4854]: I1125 09:59:05.052084 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-6ccd654df4-nc9k5"] Nov 25 09:59:05 crc kubenswrapper[4854]: I1125 09:59:05.073588 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-6ccd654df4-nc9k5"] Nov 25 09:59:05 crc kubenswrapper[4854]: I1125 09:59:05.177747 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 25 09:59:05 crc kubenswrapper[4854]: I1125 09:59:05.821260 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"2c7418fe-3812-4ca4-961a-01933db11279","Type":"ContainerStarted","Data":"7415472a45c9905175ceb9af6a7062311221f17433d1f3005d65e97270f2ff72"} Nov 25 09:59:05 crc kubenswrapper[4854]: I1125 09:59:05.836337 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-wrpfb" event={"ID":"f7c0062e-6699-45fa-a1fb-a9efb44a80e5","Type":"ContainerStarted","Data":"be34493ffb6896f322a2bab960edbfc290b1fe6331eda0bcf305befe492a80b3"} Nov 25 09:59:05 crc kubenswrapper[4854]: I1125 09:59:05.836549 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6578955fd5-wrpfb" Nov 25 09:59:05 crc kubenswrapper[4854]: I1125 09:59:05.845849 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"e9ffcff8-5138-4aff-b31c-178fbb3b54c5","Type":"ContainerStarted","Data":"e72cd43b24b3558b323395cf6ce07f9ac4f5c4eb79b0b19739bd6c3f0d22ee44"} Nov 25 09:59:06 crc kubenswrapper[4854]: I1125 09:59:06.022514 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-557b4bfdc4-lt7bk" Nov 25 09:59:06 crc kubenswrapper[4854]: I1125 09:59:06.049188 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-557b4bfdc4-lt7bk" Nov 25 09:59:06 crc kubenswrapper[4854]: I1125 09:59:06.055874 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6578955fd5-wrpfb" podStartSLOduration=5.055847604 podStartE2EDuration="5.055847604s" podCreationTimestamp="2025-11-25 09:59:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:59:05.855560011 +0000 UTC m=+1351.708553397" watchObservedRunningTime="2025-11-25 09:59:06.055847604 +0000 UTC m=+1351.908840980" Nov 25 09:59:06 crc kubenswrapper[4854]: I1125 09:59:06.858577 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"2c7418fe-3812-4ca4-961a-01933db11279","Type":"ContainerStarted","Data":"9cad3284a3832524fc0120084d4c5e45a653bd57a79ca13646e88b0953b3bb31"} Nov 25 09:59:06 crc kubenswrapper[4854]: I1125 09:59:06.860976 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"e9ffcff8-5138-4aff-b31c-178fbb3b54c5","Type":"ContainerStarted","Data":"e0de8775b24823616bbf4b6afd954926c5e4fdd9898cc77f3e7f8e7d3ddd0674"} Nov 25 09:59:06 crc kubenswrapper[4854]: I1125 09:59:06.861087 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="e9ffcff8-5138-4aff-b31c-178fbb3b54c5" containerName="cinder-api-log" containerID="cri-o://e72cd43b24b3558b323395cf6ce07f9ac4f5c4eb79b0b19739bd6c3f0d22ee44" gracePeriod=30 Nov 25 09:59:06 crc kubenswrapper[4854]: I1125 09:59:06.861218 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="e9ffcff8-5138-4aff-b31c-178fbb3b54c5" containerName="cinder-api" containerID="cri-o://e0de8775b24823616bbf4b6afd954926c5e4fdd9898cc77f3e7f8e7d3ddd0674" gracePeriod=30 Nov 25 09:59:06 crc kubenswrapper[4854]: I1125 09:59:06.861485 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 25 09:59:06 crc kubenswrapper[4854]: I1125 09:59:06.886232 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.697994053 podStartE2EDuration="5.886207994s" podCreationTimestamp="2025-11-25 09:59:01 +0000 UTC" firstStartedPulling="2025-11-25 09:59:02.8518058 +0000 UTC m=+1348.704799176" lastFinishedPulling="2025-11-25 09:59:04.040019741 +0000 UTC m=+1349.893013117" observedRunningTime="2025-11-25 09:59:06.88245887 +0000 UTC m=+1352.735452246" watchObservedRunningTime="2025-11-25 09:59:06.886207994 +0000 UTC m=+1352.739201370" Nov 25 09:59:06 crc kubenswrapper[4854]: I1125 09:59:06.909408 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=5.909387882 podStartE2EDuration="5.909387882s" podCreationTimestamp="2025-11-25 09:59:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:59:06.90312821 +0000 UTC m=+1352.756121586" watchObservedRunningTime="2025-11-25 09:59:06.909387882 +0000 UTC m=+1352.762381258" Nov 25 09:59:07 crc kubenswrapper[4854]: I1125 09:59:07.024783 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ed971a6-e96d-409b-af33-1016e52a207f" path="/var/lib/kubelet/pods/5ed971a6-e96d-409b-af33-1016e52a207f/volumes" Nov 25 09:59:07 crc kubenswrapper[4854]: I1125 09:59:07.025535 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c0c3d44b-bac5-40fd-9edd-b6ee78913589" path="/var/lib/kubelet/pods/c0c3d44b-bac5-40fd-9edd-b6ee78913589/volumes" Nov 25 09:59:07 crc kubenswrapper[4854]: I1125 09:59:07.055609 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 25 09:59:07 crc kubenswrapper[4854]: I1125 09:59:07.877868 4854 generic.go:334] "Generic (PLEG): container finished" podID="e9ffcff8-5138-4aff-b31c-178fbb3b54c5" containerID="e0de8775b24823616bbf4b6afd954926c5e4fdd9898cc77f3e7f8e7d3ddd0674" exitCode=0 Nov 25 09:59:07 crc kubenswrapper[4854]: I1125 09:59:07.878160 4854 generic.go:334] "Generic (PLEG): container finished" podID="e9ffcff8-5138-4aff-b31c-178fbb3b54c5" containerID="e72cd43b24b3558b323395cf6ce07f9ac4f5c4eb79b0b19739bd6c3f0d22ee44" exitCode=143 Nov 25 09:59:07 crc kubenswrapper[4854]: I1125 09:59:07.877926 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"e9ffcff8-5138-4aff-b31c-178fbb3b54c5","Type":"ContainerDied","Data":"e0de8775b24823616bbf4b6afd954926c5e4fdd9898cc77f3e7f8e7d3ddd0674"} Nov 25 09:59:07 crc kubenswrapper[4854]: I1125 09:59:07.879195 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"e9ffcff8-5138-4aff-b31c-178fbb3b54c5","Type":"ContainerDied","Data":"e72cd43b24b3558b323395cf6ce07f9ac4f5c4eb79b0b19739bd6c3f0d22ee44"} Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.261974 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.353396 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-combined-ca-bundle\") pod \"e9ffcff8-5138-4aff-b31c-178fbb3b54c5\" (UID: \"e9ffcff8-5138-4aff-b31c-178fbb3b54c5\") " Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.353486 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8vglh\" (UniqueName: \"kubernetes.io/projected/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-kube-api-access-8vglh\") pod \"e9ffcff8-5138-4aff-b31c-178fbb3b54c5\" (UID: \"e9ffcff8-5138-4aff-b31c-178fbb3b54c5\") " Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.353606 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-config-data\") pod \"e9ffcff8-5138-4aff-b31c-178fbb3b54c5\" (UID: \"e9ffcff8-5138-4aff-b31c-178fbb3b54c5\") " Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.353660 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-scripts\") pod \"e9ffcff8-5138-4aff-b31c-178fbb3b54c5\" (UID: \"e9ffcff8-5138-4aff-b31c-178fbb3b54c5\") " Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.353728 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-config-data-custom\") pod \"e9ffcff8-5138-4aff-b31c-178fbb3b54c5\" (UID: \"e9ffcff8-5138-4aff-b31c-178fbb3b54c5\") " Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.353764 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-etc-machine-id\") pod \"e9ffcff8-5138-4aff-b31c-178fbb3b54c5\" (UID: \"e9ffcff8-5138-4aff-b31c-178fbb3b54c5\") " Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.353822 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-logs\") pod \"e9ffcff8-5138-4aff-b31c-178fbb3b54c5\" (UID: \"e9ffcff8-5138-4aff-b31c-178fbb3b54c5\") " Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.354597 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-logs" (OuterVolumeSpecName: "logs") pod "e9ffcff8-5138-4aff-b31c-178fbb3b54c5" (UID: "e9ffcff8-5138-4aff-b31c-178fbb3b54c5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.354638 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "e9ffcff8-5138-4aff-b31c-178fbb3b54c5" (UID: "e9ffcff8-5138-4aff-b31c-178fbb3b54c5"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.364864 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-scripts" (OuterVolumeSpecName: "scripts") pod "e9ffcff8-5138-4aff-b31c-178fbb3b54c5" (UID: "e9ffcff8-5138-4aff-b31c-178fbb3b54c5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.376950 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "e9ffcff8-5138-4aff-b31c-178fbb3b54c5" (UID: "e9ffcff8-5138-4aff-b31c-178fbb3b54c5"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.389905 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-kube-api-access-8vglh" (OuterVolumeSpecName: "kube-api-access-8vglh") pod "e9ffcff8-5138-4aff-b31c-178fbb3b54c5" (UID: "e9ffcff8-5138-4aff-b31c-178fbb3b54c5"). InnerVolumeSpecName "kube-api-access-8vglh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.462253 4854 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.466711 4854 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.466871 4854 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.466928 4854 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.466982 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8vglh\" (UniqueName: \"kubernetes.io/projected/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-kube-api-access-8vglh\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.483806 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-config-data" (OuterVolumeSpecName: "config-data") pod "e9ffcff8-5138-4aff-b31c-178fbb3b54c5" (UID: "e9ffcff8-5138-4aff-b31c-178fbb3b54c5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.502938 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e9ffcff8-5138-4aff-b31c-178fbb3b54c5" (UID: "e9ffcff8-5138-4aff-b31c-178fbb3b54c5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.569324 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.569352 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9ffcff8-5138-4aff-b31c-178fbb3b54c5-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.657306 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-6ccd654df4-nc9k5" podUID="c0c3d44b-bac5-40fd-9edd-b6ee78913589" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.197:9311/healthcheck\": dial tcp 10.217.0.197:9311: i/o timeout (Client.Timeout exceeded while awaiting headers)" Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.657323 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-6ccd654df4-nc9k5" podUID="c0c3d44b-bac5-40fd-9edd-b6ee78913589" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.197:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.904508 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"e9ffcff8-5138-4aff-b31c-178fbb3b54c5","Type":"ContainerDied","Data":"dcd84894704200ae2c8c02fb47551c224ebd919ede946a668a969d6138b5e4c7"} Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.904586 4854 scope.go:117] "RemoveContainer" containerID="e0de8775b24823616bbf4b6afd954926c5e4fdd9898cc77f3e7f8e7d3ddd0674" Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.905685 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.937981 4854 scope.go:117] "RemoveContainer" containerID="e72cd43b24b3558b323395cf6ce07f9ac4f5c4eb79b0b19739bd6c3f0d22ee44" Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.948521 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.965782 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.993644 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 25 09:59:08 crc kubenswrapper[4854]: E1125 09:59:08.994072 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9ffcff8-5138-4aff-b31c-178fbb3b54c5" containerName="cinder-api" Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.994089 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9ffcff8-5138-4aff-b31c-178fbb3b54c5" containerName="cinder-api" Nov 25 09:59:08 crc kubenswrapper[4854]: E1125 09:59:08.994135 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ed971a6-e96d-409b-af33-1016e52a207f" containerName="dnsmasq-dns" Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.994142 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ed971a6-e96d-409b-af33-1016e52a207f" containerName="dnsmasq-dns" Nov 25 09:59:08 crc kubenswrapper[4854]: E1125 09:59:08.994148 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9ffcff8-5138-4aff-b31c-178fbb3b54c5" containerName="cinder-api-log" Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.994155 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9ffcff8-5138-4aff-b31c-178fbb3b54c5" containerName="cinder-api-log" Nov 25 09:59:08 crc kubenswrapper[4854]: E1125 09:59:08.994167 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0c3d44b-bac5-40fd-9edd-b6ee78913589" containerName="barbican-api-log" Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.994173 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0c3d44b-bac5-40fd-9edd-b6ee78913589" containerName="barbican-api-log" Nov 25 09:59:08 crc kubenswrapper[4854]: E1125 09:59:08.994187 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0c3d44b-bac5-40fd-9edd-b6ee78913589" containerName="barbican-api" Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.994194 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0c3d44b-bac5-40fd-9edd-b6ee78913589" containerName="barbican-api" Nov 25 09:59:08 crc kubenswrapper[4854]: E1125 09:59:08.994213 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ed971a6-e96d-409b-af33-1016e52a207f" containerName="init" Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.994218 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ed971a6-e96d-409b-af33-1016e52a207f" containerName="init" Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.994412 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0c3d44b-bac5-40fd-9edd-b6ee78913589" containerName="barbican-api-log" Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.994424 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ed971a6-e96d-409b-af33-1016e52a207f" containerName="dnsmasq-dns" Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.994442 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9ffcff8-5138-4aff-b31c-178fbb3b54c5" containerName="cinder-api" Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.994454 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9ffcff8-5138-4aff-b31c-178fbb3b54c5" containerName="cinder-api-log" Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.994463 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0c3d44b-bac5-40fd-9edd-b6ee78913589" containerName="barbican-api" Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.995596 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.997907 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.998018 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 25 09:59:08 crc kubenswrapper[4854]: I1125 09:59:08.998352 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 25 09:59:09 crc kubenswrapper[4854]: I1125 09:59:09.046729 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e9ffcff8-5138-4aff-b31c-178fbb3b54c5" path="/var/lib/kubelet/pods/e9ffcff8-5138-4aff-b31c-178fbb3b54c5/volumes" Nov 25 09:59:09 crc kubenswrapper[4854]: I1125 09:59:09.047489 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 09:59:09 crc kubenswrapper[4854]: I1125 09:59:09.180735 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cd9acce9-fd86-43e8-8359-0a31686cdc7a-scripts\") pod \"cinder-api-0\" (UID: \"cd9acce9-fd86-43e8-8359-0a31686cdc7a\") " pod="openstack/cinder-api-0" Nov 25 09:59:09 crc kubenswrapper[4854]: I1125 09:59:09.180828 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/cd9acce9-fd86-43e8-8359-0a31686cdc7a-etc-machine-id\") pod \"cinder-api-0\" (UID: \"cd9acce9-fd86-43e8-8359-0a31686cdc7a\") " pod="openstack/cinder-api-0" Nov 25 09:59:09 crc kubenswrapper[4854]: I1125 09:59:09.180860 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd9acce9-fd86-43e8-8359-0a31686cdc7a-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"cd9acce9-fd86-43e8-8359-0a31686cdc7a\") " pod="openstack/cinder-api-0" Nov 25 09:59:09 crc kubenswrapper[4854]: I1125 09:59:09.180884 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cd9acce9-fd86-43e8-8359-0a31686cdc7a-config-data-custom\") pod \"cinder-api-0\" (UID: \"cd9acce9-fd86-43e8-8359-0a31686cdc7a\") " pod="openstack/cinder-api-0" Nov 25 09:59:09 crc kubenswrapper[4854]: I1125 09:59:09.180925 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g4468\" (UniqueName: \"kubernetes.io/projected/cd9acce9-fd86-43e8-8359-0a31686cdc7a-kube-api-access-g4468\") pod \"cinder-api-0\" (UID: \"cd9acce9-fd86-43e8-8359-0a31686cdc7a\") " pod="openstack/cinder-api-0" Nov 25 09:59:09 crc kubenswrapper[4854]: I1125 09:59:09.180970 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd9acce9-fd86-43e8-8359-0a31686cdc7a-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"cd9acce9-fd86-43e8-8359-0a31686cdc7a\") " pod="openstack/cinder-api-0" Nov 25 09:59:09 crc kubenswrapper[4854]: I1125 09:59:09.181008 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd9acce9-fd86-43e8-8359-0a31686cdc7a-config-data\") pod \"cinder-api-0\" (UID: \"cd9acce9-fd86-43e8-8359-0a31686cdc7a\") " pod="openstack/cinder-api-0" Nov 25 09:59:09 crc kubenswrapper[4854]: I1125 09:59:09.181031 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd9acce9-fd86-43e8-8359-0a31686cdc7a-logs\") pod \"cinder-api-0\" (UID: \"cd9acce9-fd86-43e8-8359-0a31686cdc7a\") " pod="openstack/cinder-api-0" Nov 25 09:59:09 crc kubenswrapper[4854]: I1125 09:59:09.181194 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd9acce9-fd86-43e8-8359-0a31686cdc7a-public-tls-certs\") pod \"cinder-api-0\" (UID: \"cd9acce9-fd86-43e8-8359-0a31686cdc7a\") " pod="openstack/cinder-api-0" Nov 25 09:59:09 crc kubenswrapper[4854]: I1125 09:59:09.283617 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cd9acce9-fd86-43e8-8359-0a31686cdc7a-scripts\") pod \"cinder-api-0\" (UID: \"cd9acce9-fd86-43e8-8359-0a31686cdc7a\") " pod="openstack/cinder-api-0" Nov 25 09:59:09 crc kubenswrapper[4854]: I1125 09:59:09.283689 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/cd9acce9-fd86-43e8-8359-0a31686cdc7a-etc-machine-id\") pod \"cinder-api-0\" (UID: \"cd9acce9-fd86-43e8-8359-0a31686cdc7a\") " pod="openstack/cinder-api-0" Nov 25 09:59:09 crc kubenswrapper[4854]: I1125 09:59:09.283720 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd9acce9-fd86-43e8-8359-0a31686cdc7a-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"cd9acce9-fd86-43e8-8359-0a31686cdc7a\") " pod="openstack/cinder-api-0" Nov 25 09:59:09 crc kubenswrapper[4854]: I1125 09:59:09.283739 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cd9acce9-fd86-43e8-8359-0a31686cdc7a-config-data-custom\") pod \"cinder-api-0\" (UID: \"cd9acce9-fd86-43e8-8359-0a31686cdc7a\") " pod="openstack/cinder-api-0" Nov 25 09:59:09 crc kubenswrapper[4854]: I1125 09:59:09.283773 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g4468\" (UniqueName: \"kubernetes.io/projected/cd9acce9-fd86-43e8-8359-0a31686cdc7a-kube-api-access-g4468\") pod \"cinder-api-0\" (UID: \"cd9acce9-fd86-43e8-8359-0a31686cdc7a\") " pod="openstack/cinder-api-0" Nov 25 09:59:09 crc kubenswrapper[4854]: I1125 09:59:09.283813 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd9acce9-fd86-43e8-8359-0a31686cdc7a-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"cd9acce9-fd86-43e8-8359-0a31686cdc7a\") " pod="openstack/cinder-api-0" Nov 25 09:59:09 crc kubenswrapper[4854]: I1125 09:59:09.283813 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/cd9acce9-fd86-43e8-8359-0a31686cdc7a-etc-machine-id\") pod \"cinder-api-0\" (UID: \"cd9acce9-fd86-43e8-8359-0a31686cdc7a\") " pod="openstack/cinder-api-0" Nov 25 09:59:09 crc kubenswrapper[4854]: I1125 09:59:09.283845 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd9acce9-fd86-43e8-8359-0a31686cdc7a-config-data\") pod \"cinder-api-0\" (UID: \"cd9acce9-fd86-43e8-8359-0a31686cdc7a\") " pod="openstack/cinder-api-0" Nov 25 09:59:09 crc kubenswrapper[4854]: I1125 09:59:09.284358 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd9acce9-fd86-43e8-8359-0a31686cdc7a-logs\") pod \"cinder-api-0\" (UID: \"cd9acce9-fd86-43e8-8359-0a31686cdc7a\") " pod="openstack/cinder-api-0" Nov 25 09:59:09 crc kubenswrapper[4854]: I1125 09:59:09.284841 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd9acce9-fd86-43e8-8359-0a31686cdc7a-public-tls-certs\") pod \"cinder-api-0\" (UID: \"cd9acce9-fd86-43e8-8359-0a31686cdc7a\") " pod="openstack/cinder-api-0" Nov 25 09:59:09 crc kubenswrapper[4854]: I1125 09:59:09.285715 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd9acce9-fd86-43e8-8359-0a31686cdc7a-logs\") pod \"cinder-api-0\" (UID: \"cd9acce9-fd86-43e8-8359-0a31686cdc7a\") " pod="openstack/cinder-api-0" Nov 25 09:59:09 crc kubenswrapper[4854]: I1125 09:59:09.290184 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cd9acce9-fd86-43e8-8359-0a31686cdc7a-scripts\") pod \"cinder-api-0\" (UID: \"cd9acce9-fd86-43e8-8359-0a31686cdc7a\") " pod="openstack/cinder-api-0" Nov 25 09:59:09 crc kubenswrapper[4854]: I1125 09:59:09.290242 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd9acce9-fd86-43e8-8359-0a31686cdc7a-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"cd9acce9-fd86-43e8-8359-0a31686cdc7a\") " pod="openstack/cinder-api-0" Nov 25 09:59:09 crc kubenswrapper[4854]: I1125 09:59:09.290913 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd9acce9-fd86-43e8-8359-0a31686cdc7a-config-data\") pod \"cinder-api-0\" (UID: \"cd9acce9-fd86-43e8-8359-0a31686cdc7a\") " pod="openstack/cinder-api-0" Nov 25 09:59:09 crc kubenswrapper[4854]: I1125 09:59:09.291866 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd9acce9-fd86-43e8-8359-0a31686cdc7a-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"cd9acce9-fd86-43e8-8359-0a31686cdc7a\") " pod="openstack/cinder-api-0" Nov 25 09:59:09 crc kubenswrapper[4854]: I1125 09:59:09.296562 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cd9acce9-fd86-43e8-8359-0a31686cdc7a-config-data-custom\") pod \"cinder-api-0\" (UID: \"cd9acce9-fd86-43e8-8359-0a31686cdc7a\") " pod="openstack/cinder-api-0" Nov 25 09:59:09 crc kubenswrapper[4854]: I1125 09:59:09.302152 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd9acce9-fd86-43e8-8359-0a31686cdc7a-public-tls-certs\") pod \"cinder-api-0\" (UID: \"cd9acce9-fd86-43e8-8359-0a31686cdc7a\") " pod="openstack/cinder-api-0" Nov 25 09:59:09 crc kubenswrapper[4854]: I1125 09:59:09.302652 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g4468\" (UniqueName: \"kubernetes.io/projected/cd9acce9-fd86-43e8-8359-0a31686cdc7a-kube-api-access-g4468\") pod \"cinder-api-0\" (UID: \"cd9acce9-fd86-43e8-8359-0a31686cdc7a\") " pod="openstack/cinder-api-0" Nov 25 09:59:09 crc kubenswrapper[4854]: I1125 09:59:09.339868 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 09:59:09 crc kubenswrapper[4854]: I1125 09:59:09.670206 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-6775cd4556-vz69t" Nov 25 09:59:09 crc kubenswrapper[4854]: I1125 09:59:09.892425 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 09:59:09 crc kubenswrapper[4854]: I1125 09:59:09.925634 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"cd9acce9-fd86-43e8-8359-0a31686cdc7a","Type":"ContainerStarted","Data":"15edd26452cb54c935b22cfc1da0ce6f1b528d80bf1a4cc16bf92489f6300c85"} Nov 25 09:59:10 crc kubenswrapper[4854]: I1125 09:59:10.939007 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"cd9acce9-fd86-43e8-8359-0a31686cdc7a","Type":"ContainerStarted","Data":"2c8263a2360d8d56908c3a7031fcf0808ae1827bb39a5a43726ca3c6c4361958"} Nov 25 09:59:11 crc kubenswrapper[4854]: I1125 09:59:11.953620 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"cd9acce9-fd86-43e8-8359-0a31686cdc7a","Type":"ContainerStarted","Data":"9fc0cce113bb7f456d5c40ac676a7d60ba6cfcdbc313270450ca1dc725c5e982"} Nov 25 09:59:11 crc kubenswrapper[4854]: I1125 09:59:11.954229 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 25 09:59:11 crc kubenswrapper[4854]: I1125 09:59:11.989839 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.989816321 podStartE2EDuration="3.989816321s" podCreationTimestamp="2025-11-25 09:59:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:59:11.977626226 +0000 UTC m=+1357.830619612" watchObservedRunningTime="2025-11-25 09:59:11.989816321 +0000 UTC m=+1357.842809697" Nov 25 09:59:12 crc kubenswrapper[4854]: I1125 09:59:12.201792 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6578955fd5-wrpfb" Nov 25 09:59:12 crc kubenswrapper[4854]: I1125 09:59:12.263809 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-fd5ht"] Nov 25 09:59:12 crc kubenswrapper[4854]: I1125 09:59:12.264386 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-56df8fb6b7-fd5ht" podUID="4eaa166f-f908-46ac-87db-627ca7c64013" containerName="dnsmasq-dns" containerID="cri-o://fd62a5b4558aeeb06120ef1c1dafa0a62bbd6a6d87135493810e373877a16ef3" gracePeriod=10 Nov 25 09:59:12 crc kubenswrapper[4854]: I1125 09:59:12.387551 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 25 09:59:12 crc kubenswrapper[4854]: I1125 09:59:12.441902 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 09:59:12 crc kubenswrapper[4854]: I1125 09:59:12.764138 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-c7l4t" podUID="bb7eee5b-75e6-483b-a68e-1d6e39814690" containerName="registry-server" probeResult="failure" output=< Nov 25 09:59:12 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 09:59:12 crc kubenswrapper[4854]: > Nov 25 09:59:12 crc kubenswrapper[4854]: I1125 09:59:12.909585 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-fd5ht" Nov 25 09:59:12 crc kubenswrapper[4854]: I1125 09:59:12.973367 4854 generic.go:334] "Generic (PLEG): container finished" podID="4eaa166f-f908-46ac-87db-627ca7c64013" containerID="fd62a5b4558aeeb06120ef1c1dafa0a62bbd6a6d87135493810e373877a16ef3" exitCode=0 Nov 25 09:59:12 crc kubenswrapper[4854]: I1125 09:59:12.973427 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-fd5ht" Nov 25 09:59:12 crc kubenswrapper[4854]: I1125 09:59:12.973434 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-fd5ht" event={"ID":"4eaa166f-f908-46ac-87db-627ca7c64013","Type":"ContainerDied","Data":"fd62a5b4558aeeb06120ef1c1dafa0a62bbd6a6d87135493810e373877a16ef3"} Nov 25 09:59:12 crc kubenswrapper[4854]: I1125 09:59:12.973716 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-fd5ht" event={"ID":"4eaa166f-f908-46ac-87db-627ca7c64013","Type":"ContainerDied","Data":"ae9e0c32040cdafac834137a146ab6404c86bdaa417c1b46906ea728ebf03a8c"} Nov 25 09:59:12 crc kubenswrapper[4854]: I1125 09:59:12.973750 4854 scope.go:117] "RemoveContainer" containerID="fd62a5b4558aeeb06120ef1c1dafa0a62bbd6a6d87135493810e373877a16ef3" Nov 25 09:59:12 crc kubenswrapper[4854]: I1125 09:59:12.974053 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="2c7418fe-3812-4ca4-961a-01933db11279" containerName="cinder-scheduler" containerID="cri-o://7415472a45c9905175ceb9af6a7062311221f17433d1f3005d65e97270f2ff72" gracePeriod=30 Nov 25 09:59:12 crc kubenswrapper[4854]: I1125 09:59:12.974192 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="2c7418fe-3812-4ca4-961a-01933db11279" containerName="probe" containerID="cri-o://9cad3284a3832524fc0120084d4c5e45a653bd57a79ca13646e88b0953b3bb31" gracePeriod=30 Nov 25 09:59:13 crc kubenswrapper[4854]: I1125 09:59:13.003059 4854 scope.go:117] "RemoveContainer" containerID="9f740c6d25f4372d0e324f10bb9f465b2b14fa184ee76e75139047148f697bb8" Nov 25 09:59:13 crc kubenswrapper[4854]: I1125 09:59:13.036002 4854 scope.go:117] "RemoveContainer" containerID="fd62a5b4558aeeb06120ef1c1dafa0a62bbd6a6d87135493810e373877a16ef3" Nov 25 09:59:13 crc kubenswrapper[4854]: E1125 09:59:13.036427 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fd62a5b4558aeeb06120ef1c1dafa0a62bbd6a6d87135493810e373877a16ef3\": container with ID starting with fd62a5b4558aeeb06120ef1c1dafa0a62bbd6a6d87135493810e373877a16ef3 not found: ID does not exist" containerID="fd62a5b4558aeeb06120ef1c1dafa0a62bbd6a6d87135493810e373877a16ef3" Nov 25 09:59:13 crc kubenswrapper[4854]: I1125 09:59:13.036518 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd62a5b4558aeeb06120ef1c1dafa0a62bbd6a6d87135493810e373877a16ef3"} err="failed to get container status \"fd62a5b4558aeeb06120ef1c1dafa0a62bbd6a6d87135493810e373877a16ef3\": rpc error: code = NotFound desc = could not find container \"fd62a5b4558aeeb06120ef1c1dafa0a62bbd6a6d87135493810e373877a16ef3\": container with ID starting with fd62a5b4558aeeb06120ef1c1dafa0a62bbd6a6d87135493810e373877a16ef3 not found: ID does not exist" Nov 25 09:59:13 crc kubenswrapper[4854]: I1125 09:59:13.036553 4854 scope.go:117] "RemoveContainer" containerID="9f740c6d25f4372d0e324f10bb9f465b2b14fa184ee76e75139047148f697bb8" Nov 25 09:59:13 crc kubenswrapper[4854]: E1125 09:59:13.036818 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9f740c6d25f4372d0e324f10bb9f465b2b14fa184ee76e75139047148f697bb8\": container with ID starting with 9f740c6d25f4372d0e324f10bb9f465b2b14fa184ee76e75139047148f697bb8 not found: ID does not exist" containerID="9f740c6d25f4372d0e324f10bb9f465b2b14fa184ee76e75139047148f697bb8" Nov 25 09:59:13 crc kubenswrapper[4854]: I1125 09:59:13.036843 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f740c6d25f4372d0e324f10bb9f465b2b14fa184ee76e75139047148f697bb8"} err="failed to get container status \"9f740c6d25f4372d0e324f10bb9f465b2b14fa184ee76e75139047148f697bb8\": rpc error: code = NotFound desc = could not find container \"9f740c6d25f4372d0e324f10bb9f465b2b14fa184ee76e75139047148f697bb8\": container with ID starting with 9f740c6d25f4372d0e324f10bb9f465b2b14fa184ee76e75139047148f697bb8 not found: ID does not exist" Nov 25 09:59:13 crc kubenswrapper[4854]: I1125 09:59:13.082803 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4eaa166f-f908-46ac-87db-627ca7c64013-dns-swift-storage-0\") pod \"4eaa166f-f908-46ac-87db-627ca7c64013\" (UID: \"4eaa166f-f908-46ac-87db-627ca7c64013\") " Nov 25 09:59:13 crc kubenswrapper[4854]: I1125 09:59:13.082870 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4eaa166f-f908-46ac-87db-627ca7c64013-dns-svc\") pod \"4eaa166f-f908-46ac-87db-627ca7c64013\" (UID: \"4eaa166f-f908-46ac-87db-627ca7c64013\") " Nov 25 09:59:13 crc kubenswrapper[4854]: I1125 09:59:13.083050 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4eaa166f-f908-46ac-87db-627ca7c64013-config\") pod \"4eaa166f-f908-46ac-87db-627ca7c64013\" (UID: \"4eaa166f-f908-46ac-87db-627ca7c64013\") " Nov 25 09:59:13 crc kubenswrapper[4854]: I1125 09:59:13.083121 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mmplg\" (UniqueName: \"kubernetes.io/projected/4eaa166f-f908-46ac-87db-627ca7c64013-kube-api-access-mmplg\") pod \"4eaa166f-f908-46ac-87db-627ca7c64013\" (UID: \"4eaa166f-f908-46ac-87db-627ca7c64013\") " Nov 25 09:59:13 crc kubenswrapper[4854]: I1125 09:59:13.083188 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4eaa166f-f908-46ac-87db-627ca7c64013-ovsdbserver-sb\") pod \"4eaa166f-f908-46ac-87db-627ca7c64013\" (UID: \"4eaa166f-f908-46ac-87db-627ca7c64013\") " Nov 25 09:59:13 crc kubenswrapper[4854]: I1125 09:59:13.083224 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4eaa166f-f908-46ac-87db-627ca7c64013-ovsdbserver-nb\") pod \"4eaa166f-f908-46ac-87db-627ca7c64013\" (UID: \"4eaa166f-f908-46ac-87db-627ca7c64013\") " Nov 25 09:59:13 crc kubenswrapper[4854]: I1125 09:59:13.089917 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4eaa166f-f908-46ac-87db-627ca7c64013-kube-api-access-mmplg" (OuterVolumeSpecName: "kube-api-access-mmplg") pod "4eaa166f-f908-46ac-87db-627ca7c64013" (UID: "4eaa166f-f908-46ac-87db-627ca7c64013"). InnerVolumeSpecName "kube-api-access-mmplg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:59:13 crc kubenswrapper[4854]: I1125 09:59:13.158387 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4eaa166f-f908-46ac-87db-627ca7c64013-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4eaa166f-f908-46ac-87db-627ca7c64013" (UID: "4eaa166f-f908-46ac-87db-627ca7c64013"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:59:13 crc kubenswrapper[4854]: I1125 09:59:13.163253 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4eaa166f-f908-46ac-87db-627ca7c64013-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "4eaa166f-f908-46ac-87db-627ca7c64013" (UID: "4eaa166f-f908-46ac-87db-627ca7c64013"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:59:13 crc kubenswrapper[4854]: I1125 09:59:13.180817 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4eaa166f-f908-46ac-87db-627ca7c64013-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "4eaa166f-f908-46ac-87db-627ca7c64013" (UID: "4eaa166f-f908-46ac-87db-627ca7c64013"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:59:13 crc kubenswrapper[4854]: I1125 09:59:13.184311 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4eaa166f-f908-46ac-87db-627ca7c64013-config" (OuterVolumeSpecName: "config") pod "4eaa166f-f908-46ac-87db-627ca7c64013" (UID: "4eaa166f-f908-46ac-87db-627ca7c64013"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:59:13 crc kubenswrapper[4854]: I1125 09:59:13.186556 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4eaa166f-f908-46ac-87db-627ca7c64013-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:13 crc kubenswrapper[4854]: I1125 09:59:13.187084 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mmplg\" (UniqueName: \"kubernetes.io/projected/4eaa166f-f908-46ac-87db-627ca7c64013-kube-api-access-mmplg\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:13 crc kubenswrapper[4854]: I1125 09:59:13.187173 4854 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4eaa166f-f908-46ac-87db-627ca7c64013-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:13 crc kubenswrapper[4854]: I1125 09:59:13.187239 4854 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4eaa166f-f908-46ac-87db-627ca7c64013-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:13 crc kubenswrapper[4854]: I1125 09:59:13.187296 4854 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4eaa166f-f908-46ac-87db-627ca7c64013-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:13 crc kubenswrapper[4854]: I1125 09:59:13.187854 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4eaa166f-f908-46ac-87db-627ca7c64013-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "4eaa166f-f908-46ac-87db-627ca7c64013" (UID: "4eaa166f-f908-46ac-87db-627ca7c64013"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:59:13 crc kubenswrapper[4854]: I1125 09:59:13.289220 4854 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4eaa166f-f908-46ac-87db-627ca7c64013-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:13 crc kubenswrapper[4854]: I1125 09:59:13.306947 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-fd5ht"] Nov 25 09:59:13 crc kubenswrapper[4854]: I1125 09:59:13.321752 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-fd5ht"] Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.020862 4854 generic.go:334] "Generic (PLEG): container finished" podID="2c7418fe-3812-4ca4-961a-01933db11279" containerID="9cad3284a3832524fc0120084d4c5e45a653bd57a79ca13646e88b0953b3bb31" exitCode=0 Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.021208 4854 generic.go:334] "Generic (PLEG): container finished" podID="2c7418fe-3812-4ca4-961a-01933db11279" containerID="7415472a45c9905175ceb9af6a7062311221f17433d1f3005d65e97270f2ff72" exitCode=0 Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.021313 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"2c7418fe-3812-4ca4-961a-01933db11279","Type":"ContainerDied","Data":"9cad3284a3832524fc0120084d4c5e45a653bd57a79ca13646e88b0953b3bb31"} Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.021351 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"2c7418fe-3812-4ca4-961a-01933db11279","Type":"ContainerDied","Data":"7415472a45c9905175ceb9af6a7062311221f17433d1f3005d65e97270f2ff72"} Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.036565 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-5dbbcc5579-hmqdg"] Nov 25 09:59:14 crc kubenswrapper[4854]: E1125 09:59:14.037897 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4eaa166f-f908-46ac-87db-627ca7c64013" containerName="dnsmasq-dns" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.037918 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="4eaa166f-f908-46ac-87db-627ca7c64013" containerName="dnsmasq-dns" Nov 25 09:59:14 crc kubenswrapper[4854]: E1125 09:59:14.038000 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4eaa166f-f908-46ac-87db-627ca7c64013" containerName="init" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.038009 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="4eaa166f-f908-46ac-87db-627ca7c64013" containerName="init" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.039908 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="4eaa166f-f908-46ac-87db-627ca7c64013" containerName="dnsmasq-dns" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.047099 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-5dbbcc5579-hmqdg" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.056470 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.056812 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.057736 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.082374 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-5dbbcc5579-hmqdg"] Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.218353 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/755d55c2-0eaa-4186-bd25-00e8c34166be-etc-swift\") pod \"swift-proxy-5dbbcc5579-hmqdg\" (UID: \"755d55c2-0eaa-4186-bd25-00e8c34166be\") " pod="openstack/swift-proxy-5dbbcc5579-hmqdg" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.218414 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/755d55c2-0eaa-4186-bd25-00e8c34166be-combined-ca-bundle\") pod \"swift-proxy-5dbbcc5579-hmqdg\" (UID: \"755d55c2-0eaa-4186-bd25-00e8c34166be\") " pod="openstack/swift-proxy-5dbbcc5579-hmqdg" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.218532 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/755d55c2-0eaa-4186-bd25-00e8c34166be-public-tls-certs\") pod \"swift-proxy-5dbbcc5579-hmqdg\" (UID: \"755d55c2-0eaa-4186-bd25-00e8c34166be\") " pod="openstack/swift-proxy-5dbbcc5579-hmqdg" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.218898 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/755d55c2-0eaa-4186-bd25-00e8c34166be-config-data\") pod \"swift-proxy-5dbbcc5579-hmqdg\" (UID: \"755d55c2-0eaa-4186-bd25-00e8c34166be\") " pod="openstack/swift-proxy-5dbbcc5579-hmqdg" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.219085 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gc9bf\" (UniqueName: \"kubernetes.io/projected/755d55c2-0eaa-4186-bd25-00e8c34166be-kube-api-access-gc9bf\") pod \"swift-proxy-5dbbcc5579-hmqdg\" (UID: \"755d55c2-0eaa-4186-bd25-00e8c34166be\") " pod="openstack/swift-proxy-5dbbcc5579-hmqdg" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.219275 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/755d55c2-0eaa-4186-bd25-00e8c34166be-internal-tls-certs\") pod \"swift-proxy-5dbbcc5579-hmqdg\" (UID: \"755d55c2-0eaa-4186-bd25-00e8c34166be\") " pod="openstack/swift-proxy-5dbbcc5579-hmqdg" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.219493 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/755d55c2-0eaa-4186-bd25-00e8c34166be-run-httpd\") pod \"swift-proxy-5dbbcc5579-hmqdg\" (UID: \"755d55c2-0eaa-4186-bd25-00e8c34166be\") " pod="openstack/swift-proxy-5dbbcc5579-hmqdg" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.219629 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/755d55c2-0eaa-4186-bd25-00e8c34166be-log-httpd\") pod \"swift-proxy-5dbbcc5579-hmqdg\" (UID: \"755d55c2-0eaa-4186-bd25-00e8c34166be\") " pod="openstack/swift-proxy-5dbbcc5579-hmqdg" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.322066 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/755d55c2-0eaa-4186-bd25-00e8c34166be-config-data\") pod \"swift-proxy-5dbbcc5579-hmqdg\" (UID: \"755d55c2-0eaa-4186-bd25-00e8c34166be\") " pod="openstack/swift-proxy-5dbbcc5579-hmqdg" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.322150 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gc9bf\" (UniqueName: \"kubernetes.io/projected/755d55c2-0eaa-4186-bd25-00e8c34166be-kube-api-access-gc9bf\") pod \"swift-proxy-5dbbcc5579-hmqdg\" (UID: \"755d55c2-0eaa-4186-bd25-00e8c34166be\") " pod="openstack/swift-proxy-5dbbcc5579-hmqdg" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.322205 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/755d55c2-0eaa-4186-bd25-00e8c34166be-internal-tls-certs\") pod \"swift-proxy-5dbbcc5579-hmqdg\" (UID: \"755d55c2-0eaa-4186-bd25-00e8c34166be\") " pod="openstack/swift-proxy-5dbbcc5579-hmqdg" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.322239 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/755d55c2-0eaa-4186-bd25-00e8c34166be-run-httpd\") pod \"swift-proxy-5dbbcc5579-hmqdg\" (UID: \"755d55c2-0eaa-4186-bd25-00e8c34166be\") " pod="openstack/swift-proxy-5dbbcc5579-hmqdg" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.322281 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/755d55c2-0eaa-4186-bd25-00e8c34166be-log-httpd\") pod \"swift-proxy-5dbbcc5579-hmqdg\" (UID: \"755d55c2-0eaa-4186-bd25-00e8c34166be\") " pod="openstack/swift-proxy-5dbbcc5579-hmqdg" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.322331 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/755d55c2-0eaa-4186-bd25-00e8c34166be-etc-swift\") pod \"swift-proxy-5dbbcc5579-hmqdg\" (UID: \"755d55c2-0eaa-4186-bd25-00e8c34166be\") " pod="openstack/swift-proxy-5dbbcc5579-hmqdg" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.322358 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/755d55c2-0eaa-4186-bd25-00e8c34166be-combined-ca-bundle\") pod \"swift-proxy-5dbbcc5579-hmqdg\" (UID: \"755d55c2-0eaa-4186-bd25-00e8c34166be\") " pod="openstack/swift-proxy-5dbbcc5579-hmqdg" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.322415 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/755d55c2-0eaa-4186-bd25-00e8c34166be-public-tls-certs\") pod \"swift-proxy-5dbbcc5579-hmqdg\" (UID: \"755d55c2-0eaa-4186-bd25-00e8c34166be\") " pod="openstack/swift-proxy-5dbbcc5579-hmqdg" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.322854 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/755d55c2-0eaa-4186-bd25-00e8c34166be-log-httpd\") pod \"swift-proxy-5dbbcc5579-hmqdg\" (UID: \"755d55c2-0eaa-4186-bd25-00e8c34166be\") " pod="openstack/swift-proxy-5dbbcc5579-hmqdg" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.324200 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/755d55c2-0eaa-4186-bd25-00e8c34166be-run-httpd\") pod \"swift-proxy-5dbbcc5579-hmqdg\" (UID: \"755d55c2-0eaa-4186-bd25-00e8c34166be\") " pod="openstack/swift-proxy-5dbbcc5579-hmqdg" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.335963 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/755d55c2-0eaa-4186-bd25-00e8c34166be-internal-tls-certs\") pod \"swift-proxy-5dbbcc5579-hmqdg\" (UID: \"755d55c2-0eaa-4186-bd25-00e8c34166be\") " pod="openstack/swift-proxy-5dbbcc5579-hmqdg" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.337586 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/755d55c2-0eaa-4186-bd25-00e8c34166be-public-tls-certs\") pod \"swift-proxy-5dbbcc5579-hmqdg\" (UID: \"755d55c2-0eaa-4186-bd25-00e8c34166be\") " pod="openstack/swift-proxy-5dbbcc5579-hmqdg" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.340165 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/755d55c2-0eaa-4186-bd25-00e8c34166be-config-data\") pod \"swift-proxy-5dbbcc5579-hmqdg\" (UID: \"755d55c2-0eaa-4186-bd25-00e8c34166be\") " pod="openstack/swift-proxy-5dbbcc5579-hmqdg" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.348784 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gc9bf\" (UniqueName: \"kubernetes.io/projected/755d55c2-0eaa-4186-bd25-00e8c34166be-kube-api-access-gc9bf\") pod \"swift-proxy-5dbbcc5579-hmqdg\" (UID: \"755d55c2-0eaa-4186-bd25-00e8c34166be\") " pod="openstack/swift-proxy-5dbbcc5579-hmqdg" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.348902 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/755d55c2-0eaa-4186-bd25-00e8c34166be-combined-ca-bundle\") pod \"swift-proxy-5dbbcc5579-hmqdg\" (UID: \"755d55c2-0eaa-4186-bd25-00e8c34166be\") " pod="openstack/swift-proxy-5dbbcc5579-hmqdg" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.352271 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/755d55c2-0eaa-4186-bd25-00e8c34166be-etc-swift\") pod \"swift-proxy-5dbbcc5579-hmqdg\" (UID: \"755d55c2-0eaa-4186-bd25-00e8c34166be\") " pod="openstack/swift-proxy-5dbbcc5579-hmqdg" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.352544 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.385196 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-5dbbcc5579-hmqdg" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.426642 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2c7418fe-3812-4ca4-961a-01933db11279-scripts\") pod \"2c7418fe-3812-4ca4-961a-01933db11279\" (UID: \"2c7418fe-3812-4ca4-961a-01933db11279\") " Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.426755 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xwxdw\" (UniqueName: \"kubernetes.io/projected/2c7418fe-3812-4ca4-961a-01933db11279-kube-api-access-xwxdw\") pod \"2c7418fe-3812-4ca4-961a-01933db11279\" (UID: \"2c7418fe-3812-4ca4-961a-01933db11279\") " Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.426818 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2c7418fe-3812-4ca4-961a-01933db11279-config-data-custom\") pod \"2c7418fe-3812-4ca4-961a-01933db11279\" (UID: \"2c7418fe-3812-4ca4-961a-01933db11279\") " Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.426854 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2c7418fe-3812-4ca4-961a-01933db11279-etc-machine-id\") pod \"2c7418fe-3812-4ca4-961a-01933db11279\" (UID: \"2c7418fe-3812-4ca4-961a-01933db11279\") " Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.426884 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c7418fe-3812-4ca4-961a-01933db11279-config-data\") pod \"2c7418fe-3812-4ca4-961a-01933db11279\" (UID: \"2c7418fe-3812-4ca4-961a-01933db11279\") " Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.427097 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c7418fe-3812-4ca4-961a-01933db11279-combined-ca-bundle\") pod \"2c7418fe-3812-4ca4-961a-01933db11279\" (UID: \"2c7418fe-3812-4ca4-961a-01933db11279\") " Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.427829 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2c7418fe-3812-4ca4-961a-01933db11279-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "2c7418fe-3812-4ca4-961a-01933db11279" (UID: "2c7418fe-3812-4ca4-961a-01933db11279"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.446812 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c7418fe-3812-4ca4-961a-01933db11279-scripts" (OuterVolumeSpecName: "scripts") pod "2c7418fe-3812-4ca4-961a-01933db11279" (UID: "2c7418fe-3812-4ca4-961a-01933db11279"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.476340 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c7418fe-3812-4ca4-961a-01933db11279-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "2c7418fe-3812-4ca4-961a-01933db11279" (UID: "2c7418fe-3812-4ca4-961a-01933db11279"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.492958 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c7418fe-3812-4ca4-961a-01933db11279-kube-api-access-xwxdw" (OuterVolumeSpecName: "kube-api-access-xwxdw") pod "2c7418fe-3812-4ca4-961a-01933db11279" (UID: "2c7418fe-3812-4ca4-961a-01933db11279"). InnerVolumeSpecName "kube-api-access-xwxdw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.533562 4854 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2c7418fe-3812-4ca4-961a-01933db11279-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.533604 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xwxdw\" (UniqueName: \"kubernetes.io/projected/2c7418fe-3812-4ca4-961a-01933db11279-kube-api-access-xwxdw\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.533618 4854 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2c7418fe-3812-4ca4-961a-01933db11279-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.533629 4854 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2c7418fe-3812-4ca4-961a-01933db11279-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.650515 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 25 09:59:14 crc kubenswrapper[4854]: E1125 09:59:14.651279 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c7418fe-3812-4ca4-961a-01933db11279" containerName="probe" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.651374 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c7418fe-3812-4ca4-961a-01933db11279" containerName="probe" Nov 25 09:59:14 crc kubenswrapper[4854]: E1125 09:59:14.651457 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c7418fe-3812-4ca4-961a-01933db11279" containerName="cinder-scheduler" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.651509 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c7418fe-3812-4ca4-961a-01933db11279" containerName="cinder-scheduler" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.651834 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c7418fe-3812-4ca4-961a-01933db11279" containerName="probe" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.651906 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c7418fe-3812-4ca4-961a-01933db11279" containerName="cinder-scheduler" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.660146 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.673516 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.673571 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-hz8v6" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.673820 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.700882 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c7418fe-3812-4ca4-961a-01933db11279-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2c7418fe-3812-4ca4-961a-01933db11279" (UID: "2c7418fe-3812-4ca4-961a-01933db11279"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.711045 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c7418fe-3812-4ca4-961a-01933db11279-config-data" (OuterVolumeSpecName: "config-data") pod "2c7418fe-3812-4ca4-961a-01933db11279" (UID: "2c7418fe-3812-4ca4-961a-01933db11279"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.716296 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.747256 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32764040-6453-4e02-ab7c-6ed23dec831c-combined-ca-bundle\") pod \"openstackclient\" (UID: \"32764040-6453-4e02-ab7c-6ed23dec831c\") " pod="openstack/openstackclient" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.747408 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/32764040-6453-4e02-ab7c-6ed23dec831c-openstack-config\") pod \"openstackclient\" (UID: \"32764040-6453-4e02-ab7c-6ed23dec831c\") " pod="openstack/openstackclient" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.747549 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/32764040-6453-4e02-ab7c-6ed23dec831c-openstack-config-secret\") pod \"openstackclient\" (UID: \"32764040-6453-4e02-ab7c-6ed23dec831c\") " pod="openstack/openstackclient" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.747629 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6tnhq\" (UniqueName: \"kubernetes.io/projected/32764040-6453-4e02-ab7c-6ed23dec831c-kube-api-access-6tnhq\") pod \"openstackclient\" (UID: \"32764040-6453-4e02-ab7c-6ed23dec831c\") " pod="openstack/openstackclient" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.747948 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c7418fe-3812-4ca4-961a-01933db11279-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.747966 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c7418fe-3812-4ca4-961a-01933db11279-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.849643 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/32764040-6453-4e02-ab7c-6ed23dec831c-openstack-config\") pod \"openstackclient\" (UID: \"32764040-6453-4e02-ab7c-6ed23dec831c\") " pod="openstack/openstackclient" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.849787 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/32764040-6453-4e02-ab7c-6ed23dec831c-openstack-config-secret\") pod \"openstackclient\" (UID: \"32764040-6453-4e02-ab7c-6ed23dec831c\") " pod="openstack/openstackclient" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.849828 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6tnhq\" (UniqueName: \"kubernetes.io/projected/32764040-6453-4e02-ab7c-6ed23dec831c-kube-api-access-6tnhq\") pod \"openstackclient\" (UID: \"32764040-6453-4e02-ab7c-6ed23dec831c\") " pod="openstack/openstackclient" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.849968 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32764040-6453-4e02-ab7c-6ed23dec831c-combined-ca-bundle\") pod \"openstackclient\" (UID: \"32764040-6453-4e02-ab7c-6ed23dec831c\") " pod="openstack/openstackclient" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.852100 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/32764040-6453-4e02-ab7c-6ed23dec831c-openstack-config\") pod \"openstackclient\" (UID: \"32764040-6453-4e02-ab7c-6ed23dec831c\") " pod="openstack/openstackclient" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.853638 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32764040-6453-4e02-ab7c-6ed23dec831c-combined-ca-bundle\") pod \"openstackclient\" (UID: \"32764040-6453-4e02-ab7c-6ed23dec831c\") " pod="openstack/openstackclient" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.853852 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/32764040-6453-4e02-ab7c-6ed23dec831c-openstack-config-secret\") pod \"openstackclient\" (UID: \"32764040-6453-4e02-ab7c-6ed23dec831c\") " pod="openstack/openstackclient" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.870102 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6tnhq\" (UniqueName: \"kubernetes.io/projected/32764040-6453-4e02-ab7c-6ed23dec831c-kube-api-access-6tnhq\") pod \"openstackclient\" (UID: \"32764040-6453-4e02-ab7c-6ed23dec831c\") " pod="openstack/openstackclient" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.927708 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.928757 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.945481 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.976899 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.978425 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 09:59:14 crc kubenswrapper[4854]: I1125 09:59:14.997379 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.037267 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4eaa166f-f908-46ac-87db-627ca7c64013" path="/var/lib/kubelet/pods/4eaa166f-f908-46ac-87db-627ca7c64013/volumes" Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.054704 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/b91ef9a0-60a5-4dd9-9239-a784c885f332-openstack-config\") pod \"openstackclient\" (UID: \"b91ef9a0-60a5-4dd9-9239-a784c885f332\") " pod="openstack/openstackclient" Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.054772 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b91ef9a0-60a5-4dd9-9239-a784c885f332-openstack-config-secret\") pod \"openstackclient\" (UID: \"b91ef9a0-60a5-4dd9-9239-a784c885f332\") " pod="openstack/openstackclient" Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.055520 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zlqhk\" (UniqueName: \"kubernetes.io/projected/b91ef9a0-60a5-4dd9-9239-a784c885f332-kube-api-access-zlqhk\") pod \"openstackclient\" (UID: \"b91ef9a0-60a5-4dd9-9239-a784c885f332\") " pod="openstack/openstackclient" Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.055991 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b91ef9a0-60a5-4dd9-9239-a784c885f332-combined-ca-bundle\") pod \"openstackclient\" (UID: \"b91ef9a0-60a5-4dd9-9239-a784c885f332\") " pod="openstack/openstackclient" Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.082534 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"2c7418fe-3812-4ca4-961a-01933db11279","Type":"ContainerDied","Data":"2254ab6c681ee729346d4ca9cbccba7fabde1b34701b3c39a63ab8d28552e670"} Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.082614 4854 scope.go:117] "RemoveContainer" containerID="9cad3284a3832524fc0120084d4c5e45a653bd57a79ca13646e88b0953b3bb31" Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.082825 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.111283 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.136019 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.150172 4854 scope.go:117] "RemoveContainer" containerID="7415472a45c9905175ceb9af6a7062311221f17433d1f3005d65e97270f2ff72" Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.158638 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zlqhk\" (UniqueName: \"kubernetes.io/projected/b91ef9a0-60a5-4dd9-9239-a784c885f332-kube-api-access-zlqhk\") pod \"openstackclient\" (UID: \"b91ef9a0-60a5-4dd9-9239-a784c885f332\") " pod="openstack/openstackclient" Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.158865 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b91ef9a0-60a5-4dd9-9239-a784c885f332-combined-ca-bundle\") pod \"openstackclient\" (UID: \"b91ef9a0-60a5-4dd9-9239-a784c885f332\") " pod="openstack/openstackclient" Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.159041 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/b91ef9a0-60a5-4dd9-9239-a784c885f332-openstack-config\") pod \"openstackclient\" (UID: \"b91ef9a0-60a5-4dd9-9239-a784c885f332\") " pod="openstack/openstackclient" Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.159082 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b91ef9a0-60a5-4dd9-9239-a784c885f332-openstack-config-secret\") pod \"openstackclient\" (UID: \"b91ef9a0-60a5-4dd9-9239-a784c885f332\") " pod="openstack/openstackclient" Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.161392 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/b91ef9a0-60a5-4dd9-9239-a784c885f332-openstack-config\") pod \"openstackclient\" (UID: \"b91ef9a0-60a5-4dd9-9239-a784c885f332\") " pod="openstack/openstackclient" Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.161890 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.172003 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b91ef9a0-60a5-4dd9-9239-a784c885f332-combined-ca-bundle\") pod \"openstackclient\" (UID: \"b91ef9a0-60a5-4dd9-9239-a784c885f332\") " pod="openstack/openstackclient" Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.174306 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.174458 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b91ef9a0-60a5-4dd9-9239-a784c885f332-openstack-config-secret\") pod \"openstackclient\" (UID: \"b91ef9a0-60a5-4dd9-9239-a784c885f332\") " pod="openstack/openstackclient" Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.179148 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.180897 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.183848 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zlqhk\" (UniqueName: \"kubernetes.io/projected/b91ef9a0-60a5-4dd9-9239-a784c885f332-kube-api-access-zlqhk\") pod \"openstackclient\" (UID: \"b91ef9a0-60a5-4dd9-9239-a784c885f332\") " pod="openstack/openstackclient" Nov 25 09:59:15 crc kubenswrapper[4854]: E1125 09:59:15.197700 4854 log.go:32] "RunPodSandbox from runtime service failed" err=< Nov 25 09:59:15 crc kubenswrapper[4854]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_openstackclient_openstack_32764040-6453-4e02-ab7c-6ed23dec831c_0(535a598a0619fc53a385f934e1c4354993c7340dadbb54cc6b59425fe779513b): error adding pod openstack_openstackclient to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"535a598a0619fc53a385f934e1c4354993c7340dadbb54cc6b59425fe779513b" Netns:"/var/run/netns/aacd3903-1d5f-4adc-8410-ca2bf6bfc4b3" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openstack;K8S_POD_NAME=openstackclient;K8S_POD_INFRA_CONTAINER_ID=535a598a0619fc53a385f934e1c4354993c7340dadbb54cc6b59425fe779513b;K8S_POD_UID=32764040-6453-4e02-ab7c-6ed23dec831c" Path:"" ERRORED: error configuring pod [openstack/openstackclient] networking: Multus: [openstack/openstackclient/32764040-6453-4e02-ab7c-6ed23dec831c]: expected pod UID "32764040-6453-4e02-ab7c-6ed23dec831c" but got "b91ef9a0-60a5-4dd9-9239-a784c885f332" from Kube API Nov 25 09:59:15 crc kubenswrapper[4854]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 25 09:59:15 crc kubenswrapper[4854]: > Nov 25 09:59:15 crc kubenswrapper[4854]: E1125 09:59:15.197759 4854 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err=< Nov 25 09:59:15 crc kubenswrapper[4854]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_openstackclient_openstack_32764040-6453-4e02-ab7c-6ed23dec831c_0(535a598a0619fc53a385f934e1c4354993c7340dadbb54cc6b59425fe779513b): error adding pod openstack_openstackclient to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"535a598a0619fc53a385f934e1c4354993c7340dadbb54cc6b59425fe779513b" Netns:"/var/run/netns/aacd3903-1d5f-4adc-8410-ca2bf6bfc4b3" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openstack;K8S_POD_NAME=openstackclient;K8S_POD_INFRA_CONTAINER_ID=535a598a0619fc53a385f934e1c4354993c7340dadbb54cc6b59425fe779513b;K8S_POD_UID=32764040-6453-4e02-ab7c-6ed23dec831c" Path:"" ERRORED: error configuring pod [openstack/openstackclient] networking: Multus: [openstack/openstackclient/32764040-6453-4e02-ab7c-6ed23dec831c]: expected pod UID "32764040-6453-4e02-ab7c-6ed23dec831c" but got "b91ef9a0-60a5-4dd9-9239-a784c885f332" from Kube API Nov 25 09:59:15 crc kubenswrapper[4854]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 25 09:59:15 crc kubenswrapper[4854]: > pod="openstack/openstackclient" Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.225459 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-5dbbcc5579-hmqdg"] Nov 25 09:59:15 crc kubenswrapper[4854]: W1125 09:59:15.234141 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod755d55c2_0eaa_4186_bd25_00e8c34166be.slice/crio-e05aa10070dce8577bf804180b322cbac541bfdff24de9016bb07c309a03dfc1 WatchSource:0}: Error finding container e05aa10070dce8577bf804180b322cbac541bfdff24de9016bb07c309a03dfc1: Status 404 returned error can't find the container with id e05aa10070dce8577bf804180b322cbac541bfdff24de9016bb07c309a03dfc1 Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.261454 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bfc85811-fd01-48df-99bf-1220134a32b2-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"bfc85811-fd01-48df-99bf-1220134a32b2\") " pod="openstack/cinder-scheduler-0" Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.261515 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfc85811-fd01-48df-99bf-1220134a32b2-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"bfc85811-fd01-48df-99bf-1220134a32b2\") " pod="openstack/cinder-scheduler-0" Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.261811 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bfc85811-fd01-48df-99bf-1220134a32b2-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"bfc85811-fd01-48df-99bf-1220134a32b2\") " pod="openstack/cinder-scheduler-0" Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.262025 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bfc85811-fd01-48df-99bf-1220134a32b2-scripts\") pod \"cinder-scheduler-0\" (UID: \"bfc85811-fd01-48df-99bf-1220134a32b2\") " pod="openstack/cinder-scheduler-0" Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.262104 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfc85811-fd01-48df-99bf-1220134a32b2-config-data\") pod \"cinder-scheduler-0\" (UID: \"bfc85811-fd01-48df-99bf-1220134a32b2\") " pod="openstack/cinder-scheduler-0" Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.262252 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qpdkk\" (UniqueName: \"kubernetes.io/projected/bfc85811-fd01-48df-99bf-1220134a32b2-kube-api-access-qpdkk\") pod \"cinder-scheduler-0\" (UID: \"bfc85811-fd01-48df-99bf-1220134a32b2\") " pod="openstack/cinder-scheduler-0" Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.299651 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.363729 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qpdkk\" (UniqueName: \"kubernetes.io/projected/bfc85811-fd01-48df-99bf-1220134a32b2-kube-api-access-qpdkk\") pod \"cinder-scheduler-0\" (UID: \"bfc85811-fd01-48df-99bf-1220134a32b2\") " pod="openstack/cinder-scheduler-0" Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.363861 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bfc85811-fd01-48df-99bf-1220134a32b2-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"bfc85811-fd01-48df-99bf-1220134a32b2\") " pod="openstack/cinder-scheduler-0" Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.363887 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfc85811-fd01-48df-99bf-1220134a32b2-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"bfc85811-fd01-48df-99bf-1220134a32b2\") " pod="openstack/cinder-scheduler-0" Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.363953 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bfc85811-fd01-48df-99bf-1220134a32b2-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"bfc85811-fd01-48df-99bf-1220134a32b2\") " pod="openstack/cinder-scheduler-0" Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.364019 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bfc85811-fd01-48df-99bf-1220134a32b2-scripts\") pod \"cinder-scheduler-0\" (UID: \"bfc85811-fd01-48df-99bf-1220134a32b2\") " pod="openstack/cinder-scheduler-0" Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.364052 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfc85811-fd01-48df-99bf-1220134a32b2-config-data\") pod \"cinder-scheduler-0\" (UID: \"bfc85811-fd01-48df-99bf-1220134a32b2\") " pod="openstack/cinder-scheduler-0" Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.364249 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bfc85811-fd01-48df-99bf-1220134a32b2-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"bfc85811-fd01-48df-99bf-1220134a32b2\") " pod="openstack/cinder-scheduler-0" Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.367461 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bfc85811-fd01-48df-99bf-1220134a32b2-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"bfc85811-fd01-48df-99bf-1220134a32b2\") " pod="openstack/cinder-scheduler-0" Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.367512 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfc85811-fd01-48df-99bf-1220134a32b2-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"bfc85811-fd01-48df-99bf-1220134a32b2\") " pod="openstack/cinder-scheduler-0" Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.367771 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bfc85811-fd01-48df-99bf-1220134a32b2-scripts\") pod \"cinder-scheduler-0\" (UID: \"bfc85811-fd01-48df-99bf-1220134a32b2\") " pod="openstack/cinder-scheduler-0" Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.369353 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfc85811-fd01-48df-99bf-1220134a32b2-config-data\") pod \"cinder-scheduler-0\" (UID: \"bfc85811-fd01-48df-99bf-1220134a32b2\") " pod="openstack/cinder-scheduler-0" Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.385398 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qpdkk\" (UniqueName: \"kubernetes.io/projected/bfc85811-fd01-48df-99bf-1220134a32b2-kube-api-access-qpdkk\") pod \"cinder-scheduler-0\" (UID: \"bfc85811-fd01-48df-99bf-1220134a32b2\") " pod="openstack/cinder-scheduler-0" Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.502723 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 09:59:15 crc kubenswrapper[4854]: I1125 09:59:15.832755 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 25 09:59:16 crc kubenswrapper[4854]: I1125 09:59:16.095923 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-5dbbcc5579-hmqdg" event={"ID":"755d55c2-0eaa-4186-bd25-00e8c34166be","Type":"ContainerStarted","Data":"7ba36936b227106e49c6868f7cca4bcbee95004c3efb8cf185e188eb9f32e39c"} Nov 25 09:59:16 crc kubenswrapper[4854]: I1125 09:59:16.096276 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-5dbbcc5579-hmqdg" Nov 25 09:59:16 crc kubenswrapper[4854]: I1125 09:59:16.096288 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-5dbbcc5579-hmqdg" event={"ID":"755d55c2-0eaa-4186-bd25-00e8c34166be","Type":"ContainerStarted","Data":"644d7ec7d2bfa5809649956cc2208692b0c462dfa4f08a79247070c379ea293d"} Nov 25 09:59:16 crc kubenswrapper[4854]: I1125 09:59:16.096301 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-5dbbcc5579-hmqdg" event={"ID":"755d55c2-0eaa-4186-bd25-00e8c34166be","Type":"ContainerStarted","Data":"e05aa10070dce8577bf804180b322cbac541bfdff24de9016bb07c309a03dfc1"} Nov 25 09:59:16 crc kubenswrapper[4854]: I1125 09:59:16.096315 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-5dbbcc5579-hmqdg" Nov 25 09:59:16 crc kubenswrapper[4854]: I1125 09:59:16.096912 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"b91ef9a0-60a5-4dd9-9239-a784c885f332","Type":"ContainerStarted","Data":"81ac3457dd2115086640a67641f55aabece775657438b0504602c4dd94808976"} Nov 25 09:59:16 crc kubenswrapper[4854]: I1125 09:59:16.096955 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 09:59:16 crc kubenswrapper[4854]: I1125 09:59:16.108615 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 09:59:16 crc kubenswrapper[4854]: I1125 09:59:16.125894 4854 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="32764040-6453-4e02-ab7c-6ed23dec831c" podUID="b91ef9a0-60a5-4dd9-9239-a784c885f332" Nov 25 09:59:16 crc kubenswrapper[4854]: I1125 09:59:16.128928 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-5dbbcc5579-hmqdg" podStartSLOduration=3.128907377 podStartE2EDuration="3.128907377s" podCreationTimestamp="2025-11-25 09:59:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:59:16.117815841 +0000 UTC m=+1361.970809227" watchObservedRunningTime="2025-11-25 09:59:16.128907377 +0000 UTC m=+1361.981900753" Nov 25 09:59:16 crc kubenswrapper[4854]: W1125 09:59:16.166851 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbfc85811_fd01_48df_99bf_1220134a32b2.slice/crio-2d82535f3e9e0278a1df7159815a35f16f245d29c7db5b47c8813ef732984cf7 WatchSource:0}: Error finding container 2d82535f3e9e0278a1df7159815a35f16f245d29c7db5b47c8813ef732984cf7: Status 404 returned error can't find the container with id 2d82535f3e9e0278a1df7159815a35f16f245d29c7db5b47c8813ef732984cf7 Nov 25 09:59:16 crc kubenswrapper[4854]: I1125 09:59:16.169723 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 09:59:16 crc kubenswrapper[4854]: I1125 09:59:16.182856 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32764040-6453-4e02-ab7c-6ed23dec831c-combined-ca-bundle\") pod \"32764040-6453-4e02-ab7c-6ed23dec831c\" (UID: \"32764040-6453-4e02-ab7c-6ed23dec831c\") " Nov 25 09:59:16 crc kubenswrapper[4854]: I1125 09:59:16.182937 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/32764040-6453-4e02-ab7c-6ed23dec831c-openstack-config-secret\") pod \"32764040-6453-4e02-ab7c-6ed23dec831c\" (UID: \"32764040-6453-4e02-ab7c-6ed23dec831c\") " Nov 25 09:59:16 crc kubenswrapper[4854]: I1125 09:59:16.183011 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6tnhq\" (UniqueName: \"kubernetes.io/projected/32764040-6453-4e02-ab7c-6ed23dec831c-kube-api-access-6tnhq\") pod \"32764040-6453-4e02-ab7c-6ed23dec831c\" (UID: \"32764040-6453-4e02-ab7c-6ed23dec831c\") " Nov 25 09:59:16 crc kubenswrapper[4854]: I1125 09:59:16.183184 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/32764040-6453-4e02-ab7c-6ed23dec831c-openstack-config\") pod \"32764040-6453-4e02-ab7c-6ed23dec831c\" (UID: \"32764040-6453-4e02-ab7c-6ed23dec831c\") " Nov 25 09:59:16 crc kubenswrapper[4854]: I1125 09:59:16.185002 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/32764040-6453-4e02-ab7c-6ed23dec831c-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "32764040-6453-4e02-ab7c-6ed23dec831c" (UID: "32764040-6453-4e02-ab7c-6ed23dec831c"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:59:16 crc kubenswrapper[4854]: I1125 09:59:16.191881 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32764040-6453-4e02-ab7c-6ed23dec831c-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "32764040-6453-4e02-ab7c-6ed23dec831c" (UID: "32764040-6453-4e02-ab7c-6ed23dec831c"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:16 crc kubenswrapper[4854]: I1125 09:59:16.197846 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32764040-6453-4e02-ab7c-6ed23dec831c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "32764040-6453-4e02-ab7c-6ed23dec831c" (UID: "32764040-6453-4e02-ab7c-6ed23dec831c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:16 crc kubenswrapper[4854]: I1125 09:59:16.200924 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/32764040-6453-4e02-ab7c-6ed23dec831c-kube-api-access-6tnhq" (OuterVolumeSpecName: "kube-api-access-6tnhq") pod "32764040-6453-4e02-ab7c-6ed23dec831c" (UID: "32764040-6453-4e02-ab7c-6ed23dec831c"). InnerVolumeSpecName "kube-api-access-6tnhq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:59:16 crc kubenswrapper[4854]: I1125 09:59:16.288135 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32764040-6453-4e02-ab7c-6ed23dec831c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:16 crc kubenswrapper[4854]: I1125 09:59:16.288177 4854 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/32764040-6453-4e02-ab7c-6ed23dec831c-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:16 crc kubenswrapper[4854]: I1125 09:59:16.288190 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6tnhq\" (UniqueName: \"kubernetes.io/projected/32764040-6453-4e02-ab7c-6ed23dec831c-kube-api-access-6tnhq\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:16 crc kubenswrapper[4854]: I1125 09:59:16.288201 4854 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/32764040-6453-4e02-ab7c-6ed23dec831c-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:17 crc kubenswrapper[4854]: I1125 09:59:17.031101 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c7418fe-3812-4ca4-961a-01933db11279" path="/var/lib/kubelet/pods/2c7418fe-3812-4ca4-961a-01933db11279/volumes" Nov 25 09:59:17 crc kubenswrapper[4854]: I1125 09:59:17.032193 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="32764040-6453-4e02-ab7c-6ed23dec831c" path="/var/lib/kubelet/pods/32764040-6453-4e02-ab7c-6ed23dec831c/volumes" Nov 25 09:59:17 crc kubenswrapper[4854]: I1125 09:59:17.116015 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"bfc85811-fd01-48df-99bf-1220134a32b2","Type":"ContainerStarted","Data":"26cd49acd025860074cd5b43fc3a83086d8dcdcdf40edb742c87c9a7b9f0586e"} Nov 25 09:59:17 crc kubenswrapper[4854]: I1125 09:59:17.116326 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"bfc85811-fd01-48df-99bf-1220134a32b2","Type":"ContainerStarted","Data":"2d82535f3e9e0278a1df7159815a35f16f245d29c7db5b47c8813ef732984cf7"} Nov 25 09:59:17 crc kubenswrapper[4854]: I1125 09:59:17.116041 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 09:59:17 crc kubenswrapper[4854]: I1125 09:59:17.127810 4854 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="32764040-6453-4e02-ab7c-6ed23dec831c" podUID="b91ef9a0-60a5-4dd9-9239-a784c885f332" Nov 25 09:59:18 crc kubenswrapper[4854]: I1125 09:59:18.144099 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"bfc85811-fd01-48df-99bf-1220134a32b2","Type":"ContainerStarted","Data":"cf485df164a7a40670e983f6b9f29c8dee1e1c41ad9055529d8e10b8bd86cc50"} Nov 25 09:59:18 crc kubenswrapper[4854]: I1125 09:59:18.162517 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.16250153 podStartE2EDuration="3.16250153s" podCreationTimestamp="2025-11-25 09:59:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:59:18.161161333 +0000 UTC m=+1364.014154709" watchObservedRunningTime="2025-11-25 09:59:18.16250153 +0000 UTC m=+1364.015494896" Nov 25 09:59:18 crc kubenswrapper[4854]: I1125 09:59:18.437553 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:59:18 crc kubenswrapper[4854]: I1125 09:59:18.438266 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4c578ce9-98b9-44f5-b090-c70d836bd2dc" containerName="ceilometer-central-agent" containerID="cri-o://cbe098ce8302ae887e0eaec0ea168c1c2ba324dfeba4bce7dff301b0f6f40221" gracePeriod=30 Nov 25 09:59:18 crc kubenswrapper[4854]: I1125 09:59:18.438535 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4c578ce9-98b9-44f5-b090-c70d836bd2dc" containerName="proxy-httpd" containerID="cri-o://a8e10cb8e7a05834745edafe9a3898e902c9ccc1ceccab9e38ac94e841a30c28" gracePeriod=30 Nov 25 09:59:18 crc kubenswrapper[4854]: I1125 09:59:18.438602 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4c578ce9-98b9-44f5-b090-c70d836bd2dc" containerName="sg-core" containerID="cri-o://f645ee2dd0ff6e6daa1dd8baf729653032b04a2e2a3920c7a458df3a63e983b3" gracePeriod=30 Nov 25 09:59:18 crc kubenswrapper[4854]: I1125 09:59:18.438715 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4c578ce9-98b9-44f5-b090-c70d836bd2dc" containerName="ceilometer-notification-agent" containerID="cri-o://07fa6808e7d442837eec22a291d7d4ef60ad575fcc9f76e36725af60420ee6f9" gracePeriod=30 Nov 25 09:59:18 crc kubenswrapper[4854]: I1125 09:59:18.444851 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 25 09:59:19 crc kubenswrapper[4854]: I1125 09:59:19.188750 4854 generic.go:334] "Generic (PLEG): container finished" podID="4c578ce9-98b9-44f5-b090-c70d836bd2dc" containerID="a8e10cb8e7a05834745edafe9a3898e902c9ccc1ceccab9e38ac94e841a30c28" exitCode=0 Nov 25 09:59:19 crc kubenswrapper[4854]: I1125 09:59:19.189032 4854 generic.go:334] "Generic (PLEG): container finished" podID="4c578ce9-98b9-44f5-b090-c70d836bd2dc" containerID="f645ee2dd0ff6e6daa1dd8baf729653032b04a2e2a3920c7a458df3a63e983b3" exitCode=2 Nov 25 09:59:19 crc kubenswrapper[4854]: I1125 09:59:19.189047 4854 generic.go:334] "Generic (PLEG): container finished" podID="4c578ce9-98b9-44f5-b090-c70d836bd2dc" containerID="cbe098ce8302ae887e0eaec0ea168c1c2ba324dfeba4bce7dff301b0f6f40221" exitCode=0 Nov 25 09:59:19 crc kubenswrapper[4854]: I1125 09:59:19.188843 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4c578ce9-98b9-44f5-b090-c70d836bd2dc","Type":"ContainerDied","Data":"a8e10cb8e7a05834745edafe9a3898e902c9ccc1ceccab9e38ac94e841a30c28"} Nov 25 09:59:19 crc kubenswrapper[4854]: I1125 09:59:19.189385 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4c578ce9-98b9-44f5-b090-c70d836bd2dc","Type":"ContainerDied","Data":"f645ee2dd0ff6e6daa1dd8baf729653032b04a2e2a3920c7a458df3a63e983b3"} Nov 25 09:59:19 crc kubenswrapper[4854]: I1125 09:59:19.189414 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4c578ce9-98b9-44f5-b090-c70d836bd2dc","Type":"ContainerDied","Data":"cbe098ce8302ae887e0eaec0ea168c1c2ba324dfeba4bce7dff301b0f6f40221"} Nov 25 09:59:20 crc kubenswrapper[4854]: I1125 09:59:20.504559 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 25 09:59:20 crc kubenswrapper[4854]: I1125 09:59:20.530062 4854 generic.go:334] "Generic (PLEG): container finished" podID="4c578ce9-98b9-44f5-b090-c70d836bd2dc" containerID="07fa6808e7d442837eec22a291d7d4ef60ad575fcc9f76e36725af60420ee6f9" exitCode=0 Nov 25 09:59:20 crc kubenswrapper[4854]: I1125 09:59:20.531768 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4c578ce9-98b9-44f5-b090-c70d836bd2dc","Type":"ContainerDied","Data":"07fa6808e7d442837eec22a291d7d4ef60ad575fcc9f76e36725af60420ee6f9"} Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.071090 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.099889 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c578ce9-98b9-44f5-b090-c70d836bd2dc-scripts\") pod \"4c578ce9-98b9-44f5-b090-c70d836bd2dc\" (UID: \"4c578ce9-98b9-44f5-b090-c70d836bd2dc\") " Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.099948 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c578ce9-98b9-44f5-b090-c70d836bd2dc-config-data\") pod \"4c578ce9-98b9-44f5-b090-c70d836bd2dc\" (UID: \"4c578ce9-98b9-44f5-b090-c70d836bd2dc\") " Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.100014 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4c578ce9-98b9-44f5-b090-c70d836bd2dc-log-httpd\") pod \"4c578ce9-98b9-44f5-b090-c70d836bd2dc\" (UID: \"4c578ce9-98b9-44f5-b090-c70d836bd2dc\") " Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.100038 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c578ce9-98b9-44f5-b090-c70d836bd2dc-combined-ca-bundle\") pod \"4c578ce9-98b9-44f5-b090-c70d836bd2dc\" (UID: \"4c578ce9-98b9-44f5-b090-c70d836bd2dc\") " Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.100072 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x8vjg\" (UniqueName: \"kubernetes.io/projected/4c578ce9-98b9-44f5-b090-c70d836bd2dc-kube-api-access-x8vjg\") pod \"4c578ce9-98b9-44f5-b090-c70d836bd2dc\" (UID: \"4c578ce9-98b9-44f5-b090-c70d836bd2dc\") " Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.100108 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4c578ce9-98b9-44f5-b090-c70d836bd2dc-run-httpd\") pod \"4c578ce9-98b9-44f5-b090-c70d836bd2dc\" (UID: \"4c578ce9-98b9-44f5-b090-c70d836bd2dc\") " Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.100130 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4c578ce9-98b9-44f5-b090-c70d836bd2dc-sg-core-conf-yaml\") pod \"4c578ce9-98b9-44f5-b090-c70d836bd2dc\" (UID: \"4c578ce9-98b9-44f5-b090-c70d836bd2dc\") " Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.101824 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c578ce9-98b9-44f5-b090-c70d836bd2dc-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "4c578ce9-98b9-44f5-b090-c70d836bd2dc" (UID: "4c578ce9-98b9-44f5-b090-c70d836bd2dc"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.102824 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c578ce9-98b9-44f5-b090-c70d836bd2dc-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "4c578ce9-98b9-44f5-b090-c70d836bd2dc" (UID: "4c578ce9-98b9-44f5-b090-c70d836bd2dc"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.124311 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c578ce9-98b9-44f5-b090-c70d836bd2dc-scripts" (OuterVolumeSpecName: "scripts") pod "4c578ce9-98b9-44f5-b090-c70d836bd2dc" (UID: "4c578ce9-98b9-44f5-b090-c70d836bd2dc"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.129525 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c578ce9-98b9-44f5-b090-c70d836bd2dc-kube-api-access-x8vjg" (OuterVolumeSpecName: "kube-api-access-x8vjg") pod "4c578ce9-98b9-44f5-b090-c70d836bd2dc" (UID: "4c578ce9-98b9-44f5-b090-c70d836bd2dc"). InnerVolumeSpecName "kube-api-access-x8vjg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.205761 4854 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c578ce9-98b9-44f5-b090-c70d836bd2dc-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.205808 4854 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4c578ce9-98b9-44f5-b090-c70d836bd2dc-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.205832 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x8vjg\" (UniqueName: \"kubernetes.io/projected/4c578ce9-98b9-44f5-b090-c70d836bd2dc-kube-api-access-x8vjg\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.205844 4854 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4c578ce9-98b9-44f5-b090-c70d836bd2dc-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.207272 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c578ce9-98b9-44f5-b090-c70d836bd2dc-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "4c578ce9-98b9-44f5-b090-c70d836bd2dc" (UID: "4c578ce9-98b9-44f5-b090-c70d836bd2dc"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.266447 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c578ce9-98b9-44f5-b090-c70d836bd2dc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4c578ce9-98b9-44f5-b090-c70d836bd2dc" (UID: "4c578ce9-98b9-44f5-b090-c70d836bd2dc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.303435 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c578ce9-98b9-44f5-b090-c70d836bd2dc-config-data" (OuterVolumeSpecName: "config-data") pod "4c578ce9-98b9-44f5-b090-c70d836bd2dc" (UID: "4c578ce9-98b9-44f5-b090-c70d836bd2dc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.306997 4854 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4c578ce9-98b9-44f5-b090-c70d836bd2dc-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.307059 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c578ce9-98b9-44f5-b090-c70d836bd2dc-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.307073 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c578ce9-98b9-44f5-b090-c70d836bd2dc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.545022 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4c578ce9-98b9-44f5-b090-c70d836bd2dc","Type":"ContainerDied","Data":"1d75ba3da4d66dde34223ad650be29be4bfc12123b834f3d5eeca086f79037f0"} Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.545090 4854 scope.go:117] "RemoveContainer" containerID="a8e10cb8e7a05834745edafe9a3898e902c9ccc1ceccab9e38ac94e841a30c28" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.545298 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.613721 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.627661 4854 scope.go:117] "RemoveContainer" containerID="f645ee2dd0ff6e6daa1dd8baf729653032b04a2e2a3920c7a458df3a63e983b3" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.635405 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.679148 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:59:21 crc kubenswrapper[4854]: E1125 09:59:21.679605 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c578ce9-98b9-44f5-b090-c70d836bd2dc" containerName="ceilometer-notification-agent" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.679623 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c578ce9-98b9-44f5-b090-c70d836bd2dc" containerName="ceilometer-notification-agent" Nov 25 09:59:21 crc kubenswrapper[4854]: E1125 09:59:21.679655 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c578ce9-98b9-44f5-b090-c70d836bd2dc" containerName="sg-core" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.679661 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c578ce9-98b9-44f5-b090-c70d836bd2dc" containerName="sg-core" Nov 25 09:59:21 crc kubenswrapper[4854]: E1125 09:59:21.679691 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c578ce9-98b9-44f5-b090-c70d836bd2dc" containerName="proxy-httpd" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.679699 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c578ce9-98b9-44f5-b090-c70d836bd2dc" containerName="proxy-httpd" Nov 25 09:59:21 crc kubenswrapper[4854]: E1125 09:59:21.679715 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c578ce9-98b9-44f5-b090-c70d836bd2dc" containerName="ceilometer-central-agent" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.679721 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c578ce9-98b9-44f5-b090-c70d836bd2dc" containerName="ceilometer-central-agent" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.679913 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c578ce9-98b9-44f5-b090-c70d836bd2dc" containerName="proxy-httpd" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.679923 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c578ce9-98b9-44f5-b090-c70d836bd2dc" containerName="sg-core" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.679940 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c578ce9-98b9-44f5-b090-c70d836bd2dc" containerName="ceilometer-notification-agent" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.679961 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c578ce9-98b9-44f5-b090-c70d836bd2dc" containerName="ceilometer-central-agent" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.681896 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.688807 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.689050 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.717438 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.764848 4854 scope.go:117] "RemoveContainer" containerID="07fa6808e7d442837eec22a291d7d4ef60ad575fcc9f76e36725af60420ee6f9" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.824196 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-config-data\") pod \"ceilometer-0\" (UID: \"b0f90516-2fdf-4c1a-86e2-ea4626d8329f\") " pod="openstack/ceilometer-0" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.824246 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-run-httpd\") pod \"ceilometer-0\" (UID: \"b0f90516-2fdf-4c1a-86e2-ea4626d8329f\") " pod="openstack/ceilometer-0" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.824277 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b0f90516-2fdf-4c1a-86e2-ea4626d8329f\") " pod="openstack/ceilometer-0" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.824390 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hr2pc\" (UniqueName: \"kubernetes.io/projected/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-kube-api-access-hr2pc\") pod \"ceilometer-0\" (UID: \"b0f90516-2fdf-4c1a-86e2-ea4626d8329f\") " pod="openstack/ceilometer-0" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.824409 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-log-httpd\") pod \"ceilometer-0\" (UID: \"b0f90516-2fdf-4c1a-86e2-ea4626d8329f\") " pod="openstack/ceilometer-0" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.824605 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-scripts\") pod \"ceilometer-0\" (UID: \"b0f90516-2fdf-4c1a-86e2-ea4626d8329f\") " pod="openstack/ceilometer-0" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.824706 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b0f90516-2fdf-4c1a-86e2-ea4626d8329f\") " pod="openstack/ceilometer-0" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.871907 4854 scope.go:117] "RemoveContainer" containerID="cbe098ce8302ae887e0eaec0ea168c1c2ba324dfeba4bce7dff301b0f6f40221" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.928598 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-scripts\") pod \"ceilometer-0\" (UID: \"b0f90516-2fdf-4c1a-86e2-ea4626d8329f\") " pod="openstack/ceilometer-0" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.928712 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b0f90516-2fdf-4c1a-86e2-ea4626d8329f\") " pod="openstack/ceilometer-0" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.928774 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-config-data\") pod \"ceilometer-0\" (UID: \"b0f90516-2fdf-4c1a-86e2-ea4626d8329f\") " pod="openstack/ceilometer-0" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.928817 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-run-httpd\") pod \"ceilometer-0\" (UID: \"b0f90516-2fdf-4c1a-86e2-ea4626d8329f\") " pod="openstack/ceilometer-0" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.928859 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b0f90516-2fdf-4c1a-86e2-ea4626d8329f\") " pod="openstack/ceilometer-0" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.929134 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hr2pc\" (UniqueName: \"kubernetes.io/projected/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-kube-api-access-hr2pc\") pod \"ceilometer-0\" (UID: \"b0f90516-2fdf-4c1a-86e2-ea4626d8329f\") " pod="openstack/ceilometer-0" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.929156 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-log-httpd\") pod \"ceilometer-0\" (UID: \"b0f90516-2fdf-4c1a-86e2-ea4626d8329f\") " pod="openstack/ceilometer-0" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.929692 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-log-httpd\") pod \"ceilometer-0\" (UID: \"b0f90516-2fdf-4c1a-86e2-ea4626d8329f\") " pod="openstack/ceilometer-0" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.929834 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-run-httpd\") pod \"ceilometer-0\" (UID: \"b0f90516-2fdf-4c1a-86e2-ea4626d8329f\") " pod="openstack/ceilometer-0" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.933502 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-scripts\") pod \"ceilometer-0\" (UID: \"b0f90516-2fdf-4c1a-86e2-ea4626d8329f\") " pod="openstack/ceilometer-0" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.935573 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-config-data\") pod \"ceilometer-0\" (UID: \"b0f90516-2fdf-4c1a-86e2-ea4626d8329f\") " pod="openstack/ceilometer-0" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.937291 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b0f90516-2fdf-4c1a-86e2-ea4626d8329f\") " pod="openstack/ceilometer-0" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.941529 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.946730 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b0f90516-2fdf-4c1a-86e2-ea4626d8329f\") " pod="openstack/ceilometer-0" Nov 25 09:59:21 crc kubenswrapper[4854]: I1125 09:59:21.947924 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hr2pc\" (UniqueName: \"kubernetes.io/projected/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-kube-api-access-hr2pc\") pod \"ceilometer-0\" (UID: \"b0f90516-2fdf-4c1a-86e2-ea4626d8329f\") " pod="openstack/ceilometer-0" Nov 25 09:59:22 crc kubenswrapper[4854]: I1125 09:59:22.002814 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:59:22 crc kubenswrapper[4854]: I1125 09:59:22.805690 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-c7l4t" podUID="bb7eee5b-75e6-483b-a68e-1d6e39814690" containerName="registry-server" probeResult="failure" output=< Nov 25 09:59:22 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 09:59:22 crc kubenswrapper[4854]: > Nov 25 09:59:22 crc kubenswrapper[4854]: I1125 09:59:22.833665 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:59:22 crc kubenswrapper[4854]: W1125 09:59:22.846646 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb0f90516_2fdf_4c1a_86e2_ea4626d8329f.slice/crio-5ff6f0ebae2cf874d7dd207c637bce4046557c4ff1ded83cf0f371894cf6aaa3 WatchSource:0}: Error finding container 5ff6f0ebae2cf874d7dd207c637bce4046557c4ff1ded83cf0f371894cf6aaa3: Status 404 returned error can't find the container with id 5ff6f0ebae2cf874d7dd207c637bce4046557c4ff1ded83cf0f371894cf6aaa3 Nov 25 09:59:23 crc kubenswrapper[4854]: I1125 09:59:23.026609 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c578ce9-98b9-44f5-b090-c70d836bd2dc" path="/var/lib/kubelet/pods/4c578ce9-98b9-44f5-b090-c70d836bd2dc/volumes" Nov 25 09:59:23 crc kubenswrapper[4854]: I1125 09:59:23.571251 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b0f90516-2fdf-4c1a-86e2-ea4626d8329f","Type":"ContainerStarted","Data":"5ff6f0ebae2cf874d7dd207c637bce4046557c4ff1ded83cf0f371894cf6aaa3"} Nov 25 09:59:24 crc kubenswrapper[4854]: I1125 09:59:24.395954 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-5dbbcc5579-hmqdg" Nov 25 09:59:24 crc kubenswrapper[4854]: I1125 09:59:24.396807 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-5dbbcc5579-hmqdg" Nov 25 09:59:25 crc kubenswrapper[4854]: I1125 09:59:25.028527 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:59:25 crc kubenswrapper[4854]: I1125 09:59:25.028900 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:59:25 crc kubenswrapper[4854]: I1125 09:59:25.698399 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-84d577c77b-wtjcw" Nov 25 09:59:25 crc kubenswrapper[4854]: I1125 09:59:25.920742 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 25 09:59:26 crc kubenswrapper[4854]: I1125 09:59:26.650254 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-p2hhj"] Nov 25 09:59:26 crc kubenswrapper[4854]: I1125 09:59:26.653043 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p2hhj" Nov 25 09:59:26 crc kubenswrapper[4854]: I1125 09:59:26.661848 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-p2hhj"] Nov 25 09:59:26 crc kubenswrapper[4854]: I1125 09:59:26.746845 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f323116e-620c-40d7-8cc7-55315cd06335-utilities\") pod \"redhat-marketplace-p2hhj\" (UID: \"f323116e-620c-40d7-8cc7-55315cd06335\") " pod="openshift-marketplace/redhat-marketplace-p2hhj" Nov 25 09:59:26 crc kubenswrapper[4854]: I1125 09:59:26.747005 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f323116e-620c-40d7-8cc7-55315cd06335-catalog-content\") pod \"redhat-marketplace-p2hhj\" (UID: \"f323116e-620c-40d7-8cc7-55315cd06335\") " pod="openshift-marketplace/redhat-marketplace-p2hhj" Nov 25 09:59:26 crc kubenswrapper[4854]: I1125 09:59:26.747072 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m9dts\" (UniqueName: \"kubernetes.io/projected/f323116e-620c-40d7-8cc7-55315cd06335-kube-api-access-m9dts\") pod \"redhat-marketplace-p2hhj\" (UID: \"f323116e-620c-40d7-8cc7-55315cd06335\") " pod="openshift-marketplace/redhat-marketplace-p2hhj" Nov 25 09:59:26 crc kubenswrapper[4854]: I1125 09:59:26.849094 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f323116e-620c-40d7-8cc7-55315cd06335-catalog-content\") pod \"redhat-marketplace-p2hhj\" (UID: \"f323116e-620c-40d7-8cc7-55315cd06335\") " pod="openshift-marketplace/redhat-marketplace-p2hhj" Nov 25 09:59:26 crc kubenswrapper[4854]: I1125 09:59:26.849203 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m9dts\" (UniqueName: \"kubernetes.io/projected/f323116e-620c-40d7-8cc7-55315cd06335-kube-api-access-m9dts\") pod \"redhat-marketplace-p2hhj\" (UID: \"f323116e-620c-40d7-8cc7-55315cd06335\") " pod="openshift-marketplace/redhat-marketplace-p2hhj" Nov 25 09:59:26 crc kubenswrapper[4854]: I1125 09:59:26.849267 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f323116e-620c-40d7-8cc7-55315cd06335-utilities\") pod \"redhat-marketplace-p2hhj\" (UID: \"f323116e-620c-40d7-8cc7-55315cd06335\") " pod="openshift-marketplace/redhat-marketplace-p2hhj" Nov 25 09:59:26 crc kubenswrapper[4854]: I1125 09:59:26.849892 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f323116e-620c-40d7-8cc7-55315cd06335-utilities\") pod \"redhat-marketplace-p2hhj\" (UID: \"f323116e-620c-40d7-8cc7-55315cd06335\") " pod="openshift-marketplace/redhat-marketplace-p2hhj" Nov 25 09:59:26 crc kubenswrapper[4854]: I1125 09:59:26.849910 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f323116e-620c-40d7-8cc7-55315cd06335-catalog-content\") pod \"redhat-marketplace-p2hhj\" (UID: \"f323116e-620c-40d7-8cc7-55315cd06335\") " pod="openshift-marketplace/redhat-marketplace-p2hhj" Nov 25 09:59:26 crc kubenswrapper[4854]: I1125 09:59:26.868043 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m9dts\" (UniqueName: \"kubernetes.io/projected/f323116e-620c-40d7-8cc7-55315cd06335-kube-api-access-m9dts\") pod \"redhat-marketplace-p2hhj\" (UID: \"f323116e-620c-40d7-8cc7-55315cd06335\") " pod="openshift-marketplace/redhat-marketplace-p2hhj" Nov 25 09:59:26 crc kubenswrapper[4854]: I1125 09:59:26.996598 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p2hhj" Nov 25 09:59:28 crc kubenswrapper[4854]: I1125 09:59:28.810855 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-6f86b9df97-x4wkv" Nov 25 09:59:28 crc kubenswrapper[4854]: I1125 09:59:28.878743 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-84d577c77b-wtjcw"] Nov 25 09:59:28 crc kubenswrapper[4854]: I1125 09:59:28.879374 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-84d577c77b-wtjcw" podUID="2981e6fc-b8dc-45b4-a42d-4ccbd0372287" containerName="neutron-httpd" containerID="cri-o://65334c978879c29700e5cd3a28133a620396a73bce8704b9373c86d368830799" gracePeriod=30 Nov 25 09:59:28 crc kubenswrapper[4854]: I1125 09:59:28.879326 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-84d577c77b-wtjcw" podUID="2981e6fc-b8dc-45b4-a42d-4ccbd0372287" containerName="neutron-api" containerID="cri-o://5d12e19861ad5832516713658c0d455b1416fa18edab655544cde2f7eafc8465" gracePeriod=30 Nov 25 09:59:29 crc kubenswrapper[4854]: I1125 09:59:29.644154 4854 generic.go:334] "Generic (PLEG): container finished" podID="2981e6fc-b8dc-45b4-a42d-4ccbd0372287" containerID="65334c978879c29700e5cd3a28133a620396a73bce8704b9373c86d368830799" exitCode=0 Nov 25 09:59:29 crc kubenswrapper[4854]: I1125 09:59:29.644198 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-84d577c77b-wtjcw" event={"ID":"2981e6fc-b8dc-45b4-a42d-4ccbd0372287","Type":"ContainerDied","Data":"65334c978879c29700e5cd3a28133a620396a73bce8704b9373c86d368830799"} Nov 25 09:59:30 crc kubenswrapper[4854]: I1125 09:59:30.763349 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:30.996153 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-795866c477-qft6s"] Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.004310 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-795866c477-qft6s" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.011926 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-engine-config-data" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.012181 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-g9249" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.012971 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.057393 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-795866c477-qft6s"] Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.087342 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fa6d4a82-7a62-446e-bd21-394a6ef687c1-config-data-custom\") pod \"heat-engine-795866c477-qft6s\" (UID: \"fa6d4a82-7a62-446e-bd21-394a6ef687c1\") " pod="openstack/heat-engine-795866c477-qft6s" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.087463 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45x6q\" (UniqueName: \"kubernetes.io/projected/fa6d4a82-7a62-446e-bd21-394a6ef687c1-kube-api-access-45x6q\") pod \"heat-engine-795866c477-qft6s\" (UID: \"fa6d4a82-7a62-446e-bd21-394a6ef687c1\") " pod="openstack/heat-engine-795866c477-qft6s" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.090058 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa6d4a82-7a62-446e-bd21-394a6ef687c1-combined-ca-bundle\") pod \"heat-engine-795866c477-qft6s\" (UID: \"fa6d4a82-7a62-446e-bd21-394a6ef687c1\") " pod="openstack/heat-engine-795866c477-qft6s" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.090349 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa6d4a82-7a62-446e-bd21-394a6ef687c1-config-data\") pod \"heat-engine-795866c477-qft6s\" (UID: \"fa6d4a82-7a62-446e-bd21-394a6ef687c1\") " pod="openstack/heat-engine-795866c477-qft6s" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.182979 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-688b9f5b49-k6p96"] Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.185369 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688b9f5b49-k6p96" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.195504 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa6d4a82-7a62-446e-bd21-394a6ef687c1-config-data\") pod \"heat-engine-795866c477-qft6s\" (UID: \"fa6d4a82-7a62-446e-bd21-394a6ef687c1\") " pod="openstack/heat-engine-795866c477-qft6s" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.195796 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bc7dz\" (UniqueName: \"kubernetes.io/projected/9ff2c539-c2ac-4420-b11e-ba3c88af56be-kube-api-access-bc7dz\") pod \"dnsmasq-dns-688b9f5b49-k6p96\" (UID: \"9ff2c539-c2ac-4420-b11e-ba3c88af56be\") " pod="openstack/dnsmasq-dns-688b9f5b49-k6p96" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.195874 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9ff2c539-c2ac-4420-b11e-ba3c88af56be-dns-svc\") pod \"dnsmasq-dns-688b9f5b49-k6p96\" (UID: \"9ff2c539-c2ac-4420-b11e-ba3c88af56be\") " pod="openstack/dnsmasq-dns-688b9f5b49-k6p96" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.195974 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9ff2c539-c2ac-4420-b11e-ba3c88af56be-ovsdbserver-sb\") pod \"dnsmasq-dns-688b9f5b49-k6p96\" (UID: \"9ff2c539-c2ac-4420-b11e-ba3c88af56be\") " pod="openstack/dnsmasq-dns-688b9f5b49-k6p96" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.196081 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fa6d4a82-7a62-446e-bd21-394a6ef687c1-config-data-custom\") pod \"heat-engine-795866c477-qft6s\" (UID: \"fa6d4a82-7a62-446e-bd21-394a6ef687c1\") " pod="openstack/heat-engine-795866c477-qft6s" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.196161 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9ff2c539-c2ac-4420-b11e-ba3c88af56be-dns-swift-storage-0\") pod \"dnsmasq-dns-688b9f5b49-k6p96\" (UID: \"9ff2c539-c2ac-4420-b11e-ba3c88af56be\") " pod="openstack/dnsmasq-dns-688b9f5b49-k6p96" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.196325 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-45x6q\" (UniqueName: \"kubernetes.io/projected/fa6d4a82-7a62-446e-bd21-394a6ef687c1-kube-api-access-45x6q\") pod \"heat-engine-795866c477-qft6s\" (UID: \"fa6d4a82-7a62-446e-bd21-394a6ef687c1\") " pod="openstack/heat-engine-795866c477-qft6s" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.196436 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9ff2c539-c2ac-4420-b11e-ba3c88af56be-ovsdbserver-nb\") pod \"dnsmasq-dns-688b9f5b49-k6p96\" (UID: \"9ff2c539-c2ac-4420-b11e-ba3c88af56be\") " pod="openstack/dnsmasq-dns-688b9f5b49-k6p96" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.196608 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ff2c539-c2ac-4420-b11e-ba3c88af56be-config\") pod \"dnsmasq-dns-688b9f5b49-k6p96\" (UID: \"9ff2c539-c2ac-4420-b11e-ba3c88af56be\") " pod="openstack/dnsmasq-dns-688b9f5b49-k6p96" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.204825 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa6d4a82-7a62-446e-bd21-394a6ef687c1-combined-ca-bundle\") pod \"heat-engine-795866c477-qft6s\" (UID: \"fa6d4a82-7a62-446e-bd21-394a6ef687c1\") " pod="openstack/heat-engine-795866c477-qft6s" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.212614 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fa6d4a82-7a62-446e-bd21-394a6ef687c1-config-data-custom\") pod \"heat-engine-795866c477-qft6s\" (UID: \"fa6d4a82-7a62-446e-bd21-394a6ef687c1\") " pod="openstack/heat-engine-795866c477-qft6s" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.230731 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa6d4a82-7a62-446e-bd21-394a6ef687c1-config-data\") pod \"heat-engine-795866c477-qft6s\" (UID: \"fa6d4a82-7a62-446e-bd21-394a6ef687c1\") " pod="openstack/heat-engine-795866c477-qft6s" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.265994 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa6d4a82-7a62-446e-bd21-394a6ef687c1-combined-ca-bundle\") pod \"heat-engine-795866c477-qft6s\" (UID: \"fa6d4a82-7a62-446e-bd21-394a6ef687c1\") " pod="openstack/heat-engine-795866c477-qft6s" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.269054 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-688b9f5b49-k6p96"] Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.287804 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-45x6q\" (UniqueName: \"kubernetes.io/projected/fa6d4a82-7a62-446e-bd21-394a6ef687c1-kube-api-access-45x6q\") pod \"heat-engine-795866c477-qft6s\" (UID: \"fa6d4a82-7a62-446e-bd21-394a6ef687c1\") " pod="openstack/heat-engine-795866c477-qft6s" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.325285 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-795866c477-qft6s" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.327730 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bc7dz\" (UniqueName: \"kubernetes.io/projected/9ff2c539-c2ac-4420-b11e-ba3c88af56be-kube-api-access-bc7dz\") pod \"dnsmasq-dns-688b9f5b49-k6p96\" (UID: \"9ff2c539-c2ac-4420-b11e-ba3c88af56be\") " pod="openstack/dnsmasq-dns-688b9f5b49-k6p96" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.327769 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9ff2c539-c2ac-4420-b11e-ba3c88af56be-dns-svc\") pod \"dnsmasq-dns-688b9f5b49-k6p96\" (UID: \"9ff2c539-c2ac-4420-b11e-ba3c88af56be\") " pod="openstack/dnsmasq-dns-688b9f5b49-k6p96" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.327797 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9ff2c539-c2ac-4420-b11e-ba3c88af56be-ovsdbserver-sb\") pod \"dnsmasq-dns-688b9f5b49-k6p96\" (UID: \"9ff2c539-c2ac-4420-b11e-ba3c88af56be\") " pod="openstack/dnsmasq-dns-688b9f5b49-k6p96" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.327823 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9ff2c539-c2ac-4420-b11e-ba3c88af56be-dns-swift-storage-0\") pod \"dnsmasq-dns-688b9f5b49-k6p96\" (UID: \"9ff2c539-c2ac-4420-b11e-ba3c88af56be\") " pod="openstack/dnsmasq-dns-688b9f5b49-k6p96" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.327873 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9ff2c539-c2ac-4420-b11e-ba3c88af56be-ovsdbserver-nb\") pod \"dnsmasq-dns-688b9f5b49-k6p96\" (UID: \"9ff2c539-c2ac-4420-b11e-ba3c88af56be\") " pod="openstack/dnsmasq-dns-688b9f5b49-k6p96" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.327915 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ff2c539-c2ac-4420-b11e-ba3c88af56be-config\") pod \"dnsmasq-dns-688b9f5b49-k6p96\" (UID: \"9ff2c539-c2ac-4420-b11e-ba3c88af56be\") " pod="openstack/dnsmasq-dns-688b9f5b49-k6p96" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.334565 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9ff2c539-c2ac-4420-b11e-ba3c88af56be-dns-swift-storage-0\") pod \"dnsmasq-dns-688b9f5b49-k6p96\" (UID: \"9ff2c539-c2ac-4420-b11e-ba3c88af56be\") " pod="openstack/dnsmasq-dns-688b9f5b49-k6p96" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.335806 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9ff2c539-c2ac-4420-b11e-ba3c88af56be-ovsdbserver-nb\") pod \"dnsmasq-dns-688b9f5b49-k6p96\" (UID: \"9ff2c539-c2ac-4420-b11e-ba3c88af56be\") " pod="openstack/dnsmasq-dns-688b9f5b49-k6p96" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.336350 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9ff2c539-c2ac-4420-b11e-ba3c88af56be-dns-svc\") pod \"dnsmasq-dns-688b9f5b49-k6p96\" (UID: \"9ff2c539-c2ac-4420-b11e-ba3c88af56be\") " pod="openstack/dnsmasq-dns-688b9f5b49-k6p96" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.342104 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9ff2c539-c2ac-4420-b11e-ba3c88af56be-ovsdbserver-sb\") pod \"dnsmasq-dns-688b9f5b49-k6p96\" (UID: \"9ff2c539-c2ac-4420-b11e-ba3c88af56be\") " pod="openstack/dnsmasq-dns-688b9f5b49-k6p96" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.348416 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ff2c539-c2ac-4420-b11e-ba3c88af56be-config\") pod \"dnsmasq-dns-688b9f5b49-k6p96\" (UID: \"9ff2c539-c2ac-4420-b11e-ba3c88af56be\") " pod="openstack/dnsmasq-dns-688b9f5b49-k6p96" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.358712 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bc7dz\" (UniqueName: \"kubernetes.io/projected/9ff2c539-c2ac-4420-b11e-ba3c88af56be-kube-api-access-bc7dz\") pod \"dnsmasq-dns-688b9f5b49-k6p96\" (UID: \"9ff2c539-c2ac-4420-b11e-ba3c88af56be\") " pod="openstack/dnsmasq-dns-688b9f5b49-k6p96" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.376005 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-6d5696cb69-gnxts"] Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.386873 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-6d5696cb69-gnxts" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.395498 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-cfnapi-config-data" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.399891 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-6d5696cb69-gnxts"] Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.418871 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-55dc74c94f-t88f4"] Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.420623 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-55dc74c94f-t88f4" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.431043 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mwkll\" (UniqueName: \"kubernetes.io/projected/964f5abd-0c6c-47cf-82ca-ea31aaf2b522-kube-api-access-mwkll\") pod \"heat-cfnapi-6d5696cb69-gnxts\" (UID: \"964f5abd-0c6c-47cf-82ca-ea31aaf2b522\") " pod="openstack/heat-cfnapi-6d5696cb69-gnxts" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.431262 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/964f5abd-0c6c-47cf-82ca-ea31aaf2b522-config-data\") pod \"heat-cfnapi-6d5696cb69-gnxts\" (UID: \"964f5abd-0c6c-47cf-82ca-ea31aaf2b522\") " pod="openstack/heat-cfnapi-6d5696cb69-gnxts" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.431403 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/964f5abd-0c6c-47cf-82ca-ea31aaf2b522-config-data-custom\") pod \"heat-cfnapi-6d5696cb69-gnxts\" (UID: \"964f5abd-0c6c-47cf-82ca-ea31aaf2b522\") " pod="openstack/heat-cfnapi-6d5696cb69-gnxts" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.431444 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/964f5abd-0c6c-47cf-82ca-ea31aaf2b522-combined-ca-bundle\") pod \"heat-cfnapi-6d5696cb69-gnxts\" (UID: \"964f5abd-0c6c-47cf-82ca-ea31aaf2b522\") " pod="openstack/heat-cfnapi-6d5696cb69-gnxts" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.437312 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-55dc74c94f-t88f4"] Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.447703 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-api-config-data" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.556366 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/964f5abd-0c6c-47cf-82ca-ea31aaf2b522-config-data\") pod \"heat-cfnapi-6d5696cb69-gnxts\" (UID: \"964f5abd-0c6c-47cf-82ca-ea31aaf2b522\") " pod="openstack/heat-cfnapi-6d5696cb69-gnxts" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.556458 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b8f14245-4267-4921-996d-6d192b4c9953-config-data-custom\") pod \"heat-api-55dc74c94f-t88f4\" (UID: \"b8f14245-4267-4921-996d-6d192b4c9953\") " pod="openstack/heat-api-55dc74c94f-t88f4" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.556665 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l4675\" (UniqueName: \"kubernetes.io/projected/b8f14245-4267-4921-996d-6d192b4c9953-kube-api-access-l4675\") pod \"heat-api-55dc74c94f-t88f4\" (UID: \"b8f14245-4267-4921-996d-6d192b4c9953\") " pod="openstack/heat-api-55dc74c94f-t88f4" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.556922 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/964f5abd-0c6c-47cf-82ca-ea31aaf2b522-config-data-custom\") pod \"heat-cfnapi-6d5696cb69-gnxts\" (UID: \"964f5abd-0c6c-47cf-82ca-ea31aaf2b522\") " pod="openstack/heat-cfnapi-6d5696cb69-gnxts" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.556972 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/964f5abd-0c6c-47cf-82ca-ea31aaf2b522-combined-ca-bundle\") pod \"heat-cfnapi-6d5696cb69-gnxts\" (UID: \"964f5abd-0c6c-47cf-82ca-ea31aaf2b522\") " pod="openstack/heat-cfnapi-6d5696cb69-gnxts" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.557099 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8f14245-4267-4921-996d-6d192b4c9953-combined-ca-bundle\") pod \"heat-api-55dc74c94f-t88f4\" (UID: \"b8f14245-4267-4921-996d-6d192b4c9953\") " pod="openstack/heat-api-55dc74c94f-t88f4" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.557207 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mwkll\" (UniqueName: \"kubernetes.io/projected/964f5abd-0c6c-47cf-82ca-ea31aaf2b522-kube-api-access-mwkll\") pod \"heat-cfnapi-6d5696cb69-gnxts\" (UID: \"964f5abd-0c6c-47cf-82ca-ea31aaf2b522\") " pod="openstack/heat-cfnapi-6d5696cb69-gnxts" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.557253 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8f14245-4267-4921-996d-6d192b4c9953-config-data\") pod \"heat-api-55dc74c94f-t88f4\" (UID: \"b8f14245-4267-4921-996d-6d192b4c9953\") " pod="openstack/heat-api-55dc74c94f-t88f4" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.579657 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/964f5abd-0c6c-47cf-82ca-ea31aaf2b522-config-data-custom\") pod \"heat-cfnapi-6d5696cb69-gnxts\" (UID: \"964f5abd-0c6c-47cf-82ca-ea31aaf2b522\") " pod="openstack/heat-cfnapi-6d5696cb69-gnxts" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.587665 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/964f5abd-0c6c-47cf-82ca-ea31aaf2b522-config-data\") pod \"heat-cfnapi-6d5696cb69-gnxts\" (UID: \"964f5abd-0c6c-47cf-82ca-ea31aaf2b522\") " pod="openstack/heat-cfnapi-6d5696cb69-gnxts" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.598258 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/964f5abd-0c6c-47cf-82ca-ea31aaf2b522-combined-ca-bundle\") pod \"heat-cfnapi-6d5696cb69-gnxts\" (UID: \"964f5abd-0c6c-47cf-82ca-ea31aaf2b522\") " pod="openstack/heat-cfnapi-6d5696cb69-gnxts" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.610601 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mwkll\" (UniqueName: \"kubernetes.io/projected/964f5abd-0c6c-47cf-82ca-ea31aaf2b522-kube-api-access-mwkll\") pod \"heat-cfnapi-6d5696cb69-gnxts\" (UID: \"964f5abd-0c6c-47cf-82ca-ea31aaf2b522\") " pod="openstack/heat-cfnapi-6d5696cb69-gnxts" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.656697 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688b9f5b49-k6p96" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.663834 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b8f14245-4267-4921-996d-6d192b4c9953-config-data-custom\") pod \"heat-api-55dc74c94f-t88f4\" (UID: \"b8f14245-4267-4921-996d-6d192b4c9953\") " pod="openstack/heat-api-55dc74c94f-t88f4" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.664554 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l4675\" (UniqueName: \"kubernetes.io/projected/b8f14245-4267-4921-996d-6d192b4c9953-kube-api-access-l4675\") pod \"heat-api-55dc74c94f-t88f4\" (UID: \"b8f14245-4267-4921-996d-6d192b4c9953\") " pod="openstack/heat-api-55dc74c94f-t88f4" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.667429 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8f14245-4267-4921-996d-6d192b4c9953-combined-ca-bundle\") pod \"heat-api-55dc74c94f-t88f4\" (UID: \"b8f14245-4267-4921-996d-6d192b4c9953\") " pod="openstack/heat-api-55dc74c94f-t88f4" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.673185 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8f14245-4267-4921-996d-6d192b4c9953-config-data\") pod \"heat-api-55dc74c94f-t88f4\" (UID: \"b8f14245-4267-4921-996d-6d192b4c9953\") " pod="openstack/heat-api-55dc74c94f-t88f4" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.675051 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b8f14245-4267-4921-996d-6d192b4c9953-config-data-custom\") pod \"heat-api-55dc74c94f-t88f4\" (UID: \"b8f14245-4267-4921-996d-6d192b4c9953\") " pod="openstack/heat-api-55dc74c94f-t88f4" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.690392 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8f14245-4267-4921-996d-6d192b4c9953-combined-ca-bundle\") pod \"heat-api-55dc74c94f-t88f4\" (UID: \"b8f14245-4267-4921-996d-6d192b4c9953\") " pod="openstack/heat-api-55dc74c94f-t88f4" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.695607 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l4675\" (UniqueName: \"kubernetes.io/projected/b8f14245-4267-4921-996d-6d192b4c9953-kube-api-access-l4675\") pod \"heat-api-55dc74c94f-t88f4\" (UID: \"b8f14245-4267-4921-996d-6d192b4c9953\") " pod="openstack/heat-api-55dc74c94f-t88f4" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.696988 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8f14245-4267-4921-996d-6d192b4c9953-config-data\") pod \"heat-api-55dc74c94f-t88f4\" (UID: \"b8f14245-4267-4921-996d-6d192b4c9953\") " pod="openstack/heat-api-55dc74c94f-t88f4" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.724381 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-6d5696cb69-gnxts" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.748029 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"b91ef9a0-60a5-4dd9-9239-a784c885f332","Type":"ContainerStarted","Data":"40e417feb8ea49cf83ffb49e61942465ed96aba7eba9d22b64975a992970c3ca"} Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.767284 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-p2hhj"] Nov 25 09:59:31 crc kubenswrapper[4854]: W1125 09:59:31.781959 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf323116e_620c_40d7_8cc7_55315cd06335.slice/crio-666606f3a882f4079221ab2d657c2fc11f7dd84b4946ab1f1055b353575527ea WatchSource:0}: Error finding container 666606f3a882f4079221ab2d657c2fc11f7dd84b4946ab1f1055b353575527ea: Status 404 returned error can't find the container with id 666606f3a882f4079221ab2d657c2fc11f7dd84b4946ab1f1055b353575527ea Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.799047 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b0f90516-2fdf-4c1a-86e2-ea4626d8329f","Type":"ContainerStarted","Data":"be204f745c00da1daba9f9591c35691d193c6dbc9bb5a5fe0debeb822639879d"} Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.801476 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.683289723 podStartE2EDuration="17.801457262s" podCreationTimestamp="2025-11-25 09:59:14 +0000 UTC" firstStartedPulling="2025-11-25 09:59:15.851732297 +0000 UTC m=+1361.704725663" lastFinishedPulling="2025-11-25 09:59:30.969899796 +0000 UTC m=+1376.822893202" observedRunningTime="2025-11-25 09:59:31.790315564 +0000 UTC m=+1377.643308940" watchObservedRunningTime="2025-11-25 09:59:31.801457262 +0000 UTC m=+1377.654450638" Nov 25 09:59:31 crc kubenswrapper[4854]: I1125 09:59:31.842585 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-55dc74c94f-t88f4" Nov 25 09:59:32 crc kubenswrapper[4854]: I1125 09:59:32.084904 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-795866c477-qft6s"] Nov 25 09:59:32 crc kubenswrapper[4854]: I1125 09:59:32.130743 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-xgtsh"] Nov 25 09:59:32 crc kubenswrapper[4854]: I1125 09:59:32.134064 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xgtsh" Nov 25 09:59:32 crc kubenswrapper[4854]: I1125 09:59:32.149125 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xgtsh"] Nov 25 09:59:32 crc kubenswrapper[4854]: I1125 09:59:32.184241 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3405ae80-37c0-433b-99aa-f9e233d61d86-catalog-content\") pod \"community-operators-xgtsh\" (UID: \"3405ae80-37c0-433b-99aa-f9e233d61d86\") " pod="openshift-marketplace/community-operators-xgtsh" Nov 25 09:59:32 crc kubenswrapper[4854]: I1125 09:59:32.184713 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pprf7\" (UniqueName: \"kubernetes.io/projected/3405ae80-37c0-433b-99aa-f9e233d61d86-kube-api-access-pprf7\") pod \"community-operators-xgtsh\" (UID: \"3405ae80-37c0-433b-99aa-f9e233d61d86\") " pod="openshift-marketplace/community-operators-xgtsh" Nov 25 09:59:32 crc kubenswrapper[4854]: I1125 09:59:32.184820 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3405ae80-37c0-433b-99aa-f9e233d61d86-utilities\") pod \"community-operators-xgtsh\" (UID: \"3405ae80-37c0-433b-99aa-f9e233d61d86\") " pod="openshift-marketplace/community-operators-xgtsh" Nov 25 09:59:32 crc kubenswrapper[4854]: I1125 09:59:32.288075 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pprf7\" (UniqueName: \"kubernetes.io/projected/3405ae80-37c0-433b-99aa-f9e233d61d86-kube-api-access-pprf7\") pod \"community-operators-xgtsh\" (UID: \"3405ae80-37c0-433b-99aa-f9e233d61d86\") " pod="openshift-marketplace/community-operators-xgtsh" Nov 25 09:59:32 crc kubenswrapper[4854]: I1125 09:59:32.288563 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3405ae80-37c0-433b-99aa-f9e233d61d86-utilities\") pod \"community-operators-xgtsh\" (UID: \"3405ae80-37c0-433b-99aa-f9e233d61d86\") " pod="openshift-marketplace/community-operators-xgtsh" Nov 25 09:59:32 crc kubenswrapper[4854]: I1125 09:59:32.288820 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3405ae80-37c0-433b-99aa-f9e233d61d86-catalog-content\") pod \"community-operators-xgtsh\" (UID: \"3405ae80-37c0-433b-99aa-f9e233d61d86\") " pod="openshift-marketplace/community-operators-xgtsh" Nov 25 09:59:32 crc kubenswrapper[4854]: I1125 09:59:32.289451 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3405ae80-37c0-433b-99aa-f9e233d61d86-utilities\") pod \"community-operators-xgtsh\" (UID: \"3405ae80-37c0-433b-99aa-f9e233d61d86\") " pod="openshift-marketplace/community-operators-xgtsh" Nov 25 09:59:32 crc kubenswrapper[4854]: I1125 09:59:32.289635 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3405ae80-37c0-433b-99aa-f9e233d61d86-catalog-content\") pod \"community-operators-xgtsh\" (UID: \"3405ae80-37c0-433b-99aa-f9e233d61d86\") " pod="openshift-marketplace/community-operators-xgtsh" Nov 25 09:59:32 crc kubenswrapper[4854]: I1125 09:59:32.313198 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pprf7\" (UniqueName: \"kubernetes.io/projected/3405ae80-37c0-433b-99aa-f9e233d61d86-kube-api-access-pprf7\") pod \"community-operators-xgtsh\" (UID: \"3405ae80-37c0-433b-99aa-f9e233d61d86\") " pod="openshift-marketplace/community-operators-xgtsh" Nov 25 09:59:32 crc kubenswrapper[4854]: I1125 09:59:32.447134 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-688b9f5b49-k6p96"] Nov 25 09:59:32 crc kubenswrapper[4854]: W1125 09:59:32.468849 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9ff2c539_c2ac_4420_b11e_ba3c88af56be.slice/crio-fa010a79ae8e0c040d0e0ef4fe454a2096e707dfb137f8fb08b6de756aab50da WatchSource:0}: Error finding container fa010a79ae8e0c040d0e0ef4fe454a2096e707dfb137f8fb08b6de756aab50da: Status 404 returned error can't find the container with id fa010a79ae8e0c040d0e0ef4fe454a2096e707dfb137f8fb08b6de756aab50da Nov 25 09:59:32 crc kubenswrapper[4854]: I1125 09:59:32.530281 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xgtsh" Nov 25 09:59:32 crc kubenswrapper[4854]: I1125 09:59:32.637918 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-6d5696cb69-gnxts"] Nov 25 09:59:32 crc kubenswrapper[4854]: I1125 09:59:32.732634 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-c7l4t" podUID="bb7eee5b-75e6-483b-a68e-1d6e39814690" containerName="registry-server" probeResult="failure" output=< Nov 25 09:59:32 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 09:59:32 crc kubenswrapper[4854]: > Nov 25 09:59:32 crc kubenswrapper[4854]: I1125 09:59:32.743457 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-55dc74c94f-t88f4"] Nov 25 09:59:32 crc kubenswrapper[4854]: I1125 09:59:32.840187 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688b9f5b49-k6p96" event={"ID":"9ff2c539-c2ac-4420-b11e-ba3c88af56be","Type":"ContainerStarted","Data":"fa010a79ae8e0c040d0e0ef4fe454a2096e707dfb137f8fb08b6de756aab50da"} Nov 25 09:59:32 crc kubenswrapper[4854]: I1125 09:59:32.844659 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-55dc74c94f-t88f4" event={"ID":"b8f14245-4267-4921-996d-6d192b4c9953","Type":"ContainerStarted","Data":"2811c941f75005cc0849e6b37c2343bb829765aa3d584f45a3eafb60b59e2c8d"} Nov 25 09:59:32 crc kubenswrapper[4854]: I1125 09:59:32.863433 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-6d5696cb69-gnxts" event={"ID":"964f5abd-0c6c-47cf-82ca-ea31aaf2b522","Type":"ContainerStarted","Data":"0bbb3429731b7139ebed87acfe9153d296c325f5a9354f29705dccdefd7472f6"} Nov 25 09:59:32 crc kubenswrapper[4854]: I1125 09:59:32.914619 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-795866c477-qft6s" event={"ID":"fa6d4a82-7a62-446e-bd21-394a6ef687c1","Type":"ContainerStarted","Data":"187e9d86a62fe917a0205f8d650b4d046ab246dc1b2d0cf9a060f1c5e6af8684"} Nov 25 09:59:32 crc kubenswrapper[4854]: I1125 09:59:32.914956 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-795866c477-qft6s" event={"ID":"fa6d4a82-7a62-446e-bd21-394a6ef687c1","Type":"ContainerStarted","Data":"76ddaf799a261b67eeb26ea288e56906d54ac8b7220c1da1464bde02c7346207"} Nov 25 09:59:32 crc kubenswrapper[4854]: I1125 09:59:32.916234 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-795866c477-qft6s" Nov 25 09:59:32 crc kubenswrapper[4854]: I1125 09:59:32.958268 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b0f90516-2fdf-4c1a-86e2-ea4626d8329f","Type":"ContainerStarted","Data":"dcf0dcf535c06a609c0a2da5894b4cfc5ea413984742ec20806adf0b4170ad32"} Nov 25 09:59:32 crc kubenswrapper[4854]: I1125 09:59:32.960363 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-795866c477-qft6s" podStartSLOduration=2.960349667 podStartE2EDuration="2.960349667s" podCreationTimestamp="2025-11-25 09:59:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:59:32.949472266 +0000 UTC m=+1378.802465662" watchObservedRunningTime="2025-11-25 09:59:32.960349667 +0000 UTC m=+1378.813343043" Nov 25 09:59:32 crc kubenswrapper[4854]: I1125 09:59:32.965835 4854 generic.go:334] "Generic (PLEG): container finished" podID="f323116e-620c-40d7-8cc7-55315cd06335" containerID="93f0b094ffda1d0c6d04df53d6f472d5a34b8201cd681c700b6cedc6eb458224" exitCode=0 Nov 25 09:59:32 crc kubenswrapper[4854]: I1125 09:59:32.965903 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p2hhj" event={"ID":"f323116e-620c-40d7-8cc7-55315cd06335","Type":"ContainerDied","Data":"93f0b094ffda1d0c6d04df53d6f472d5a34b8201cd681c700b6cedc6eb458224"} Nov 25 09:59:32 crc kubenswrapper[4854]: I1125 09:59:32.965933 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p2hhj" event={"ID":"f323116e-620c-40d7-8cc7-55315cd06335","Type":"ContainerStarted","Data":"666606f3a882f4079221ab2d657c2fc11f7dd84b4946ab1f1055b353575527ea"} Nov 25 09:59:33 crc kubenswrapper[4854]: I1125 09:59:33.434935 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xgtsh"] Nov 25 09:59:34 crc kubenswrapper[4854]: I1125 09:59:34.016410 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b0f90516-2fdf-4c1a-86e2-ea4626d8329f","Type":"ContainerStarted","Data":"a16594f1f0d17c104622cab8dca5a88b91d0ddf48062c3fac4fb2dea4056c9cb"} Nov 25 09:59:34 crc kubenswrapper[4854]: I1125 09:59:34.023183 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p2hhj" event={"ID":"f323116e-620c-40d7-8cc7-55315cd06335","Type":"ContainerStarted","Data":"a1a4ea3da684f5af4c1f39b6d1f70b08bba1bded012615b2e6ac6804139f92e2"} Nov 25 09:59:34 crc kubenswrapper[4854]: I1125 09:59:34.037335 4854 generic.go:334] "Generic (PLEG): container finished" podID="9ff2c539-c2ac-4420-b11e-ba3c88af56be" containerID="ad3ce8348ec444b7bf52af5f8a59b448785767344363526226c975826aba8488" exitCode=0 Nov 25 09:59:34 crc kubenswrapper[4854]: I1125 09:59:34.037401 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688b9f5b49-k6p96" event={"ID":"9ff2c539-c2ac-4420-b11e-ba3c88af56be","Type":"ContainerDied","Data":"ad3ce8348ec444b7bf52af5f8a59b448785767344363526226c975826aba8488"} Nov 25 09:59:34 crc kubenswrapper[4854]: I1125 09:59:34.043722 4854 generic.go:334] "Generic (PLEG): container finished" podID="3405ae80-37c0-433b-99aa-f9e233d61d86" containerID="71492cd3499f60b83df0efaac46c3197a38d6dc4790c49128b37ea8e68cdd155" exitCode=0 Nov 25 09:59:34 crc kubenswrapper[4854]: I1125 09:59:34.044464 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xgtsh" event={"ID":"3405ae80-37c0-433b-99aa-f9e233d61d86","Type":"ContainerDied","Data":"71492cd3499f60b83df0efaac46c3197a38d6dc4790c49128b37ea8e68cdd155"} Nov 25 09:59:34 crc kubenswrapper[4854]: I1125 09:59:34.044489 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xgtsh" event={"ID":"3405ae80-37c0-433b-99aa-f9e233d61d86","Type":"ContainerStarted","Data":"f12ac0894e63c6887007774d3cd9777e62797e66bff40c1f69102ff24c419fec"} Nov 25 09:59:34 crc kubenswrapper[4854]: I1125 09:59:34.741049 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-84d577c77b-wtjcw" Nov 25 09:59:34 crc kubenswrapper[4854]: I1125 09:59:34.793782 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/2981e6fc-b8dc-45b4-a42d-4ccbd0372287-httpd-config\") pod \"2981e6fc-b8dc-45b4-a42d-4ccbd0372287\" (UID: \"2981e6fc-b8dc-45b4-a42d-4ccbd0372287\") " Nov 25 09:59:34 crc kubenswrapper[4854]: I1125 09:59:34.793975 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/2981e6fc-b8dc-45b4-a42d-4ccbd0372287-config\") pod \"2981e6fc-b8dc-45b4-a42d-4ccbd0372287\" (UID: \"2981e6fc-b8dc-45b4-a42d-4ccbd0372287\") " Nov 25 09:59:34 crc kubenswrapper[4854]: I1125 09:59:34.794094 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/2981e6fc-b8dc-45b4-a42d-4ccbd0372287-ovndb-tls-certs\") pod \"2981e6fc-b8dc-45b4-a42d-4ccbd0372287\" (UID: \"2981e6fc-b8dc-45b4-a42d-4ccbd0372287\") " Nov 25 09:59:34 crc kubenswrapper[4854]: I1125 09:59:34.794188 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zbsdx\" (UniqueName: \"kubernetes.io/projected/2981e6fc-b8dc-45b4-a42d-4ccbd0372287-kube-api-access-zbsdx\") pod \"2981e6fc-b8dc-45b4-a42d-4ccbd0372287\" (UID: \"2981e6fc-b8dc-45b4-a42d-4ccbd0372287\") " Nov 25 09:59:34 crc kubenswrapper[4854]: I1125 09:59:34.794234 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2981e6fc-b8dc-45b4-a42d-4ccbd0372287-combined-ca-bundle\") pod \"2981e6fc-b8dc-45b4-a42d-4ccbd0372287\" (UID: \"2981e6fc-b8dc-45b4-a42d-4ccbd0372287\") " Nov 25 09:59:34 crc kubenswrapper[4854]: I1125 09:59:34.803976 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2981e6fc-b8dc-45b4-a42d-4ccbd0372287-kube-api-access-zbsdx" (OuterVolumeSpecName: "kube-api-access-zbsdx") pod "2981e6fc-b8dc-45b4-a42d-4ccbd0372287" (UID: "2981e6fc-b8dc-45b4-a42d-4ccbd0372287"). InnerVolumeSpecName "kube-api-access-zbsdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:59:34 crc kubenswrapper[4854]: I1125 09:59:34.809996 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2981e6fc-b8dc-45b4-a42d-4ccbd0372287-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "2981e6fc-b8dc-45b4-a42d-4ccbd0372287" (UID: "2981e6fc-b8dc-45b4-a42d-4ccbd0372287"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:34 crc kubenswrapper[4854]: I1125 09:59:34.893494 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2981e6fc-b8dc-45b4-a42d-4ccbd0372287-config" (OuterVolumeSpecName: "config") pod "2981e6fc-b8dc-45b4-a42d-4ccbd0372287" (UID: "2981e6fc-b8dc-45b4-a42d-4ccbd0372287"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:34 crc kubenswrapper[4854]: I1125 09:59:34.898540 4854 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/2981e6fc-b8dc-45b4-a42d-4ccbd0372287-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:34 crc kubenswrapper[4854]: I1125 09:59:34.898571 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/2981e6fc-b8dc-45b4-a42d-4ccbd0372287-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:34 crc kubenswrapper[4854]: I1125 09:59:34.898584 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zbsdx\" (UniqueName: \"kubernetes.io/projected/2981e6fc-b8dc-45b4-a42d-4ccbd0372287-kube-api-access-zbsdx\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:34 crc kubenswrapper[4854]: I1125 09:59:34.952171 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2981e6fc-b8dc-45b4-a42d-4ccbd0372287-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2981e6fc-b8dc-45b4-a42d-4ccbd0372287" (UID: "2981e6fc-b8dc-45b4-a42d-4ccbd0372287"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:35 crc kubenswrapper[4854]: I1125 09:59:35.000946 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2981e6fc-b8dc-45b4-a42d-4ccbd0372287-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:35 crc kubenswrapper[4854]: I1125 09:59:35.052096 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2981e6fc-b8dc-45b4-a42d-4ccbd0372287-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "2981e6fc-b8dc-45b4-a42d-4ccbd0372287" (UID: "2981e6fc-b8dc-45b4-a42d-4ccbd0372287"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:35 crc kubenswrapper[4854]: I1125 09:59:35.092723 4854 generic.go:334] "Generic (PLEG): container finished" podID="2981e6fc-b8dc-45b4-a42d-4ccbd0372287" containerID="5d12e19861ad5832516713658c0d455b1416fa18edab655544cde2f7eafc8465" exitCode=0 Nov 25 09:59:35 crc kubenswrapper[4854]: I1125 09:59:35.093013 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-84d577c77b-wtjcw" Nov 25 09:59:35 crc kubenswrapper[4854]: I1125 09:59:35.103658 4854 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/2981e6fc-b8dc-45b4-a42d-4ccbd0372287-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:35 crc kubenswrapper[4854]: I1125 09:59:35.113897 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-688b9f5b49-k6p96" podStartSLOduration=4.113880521 podStartE2EDuration="4.113880521s" podCreationTimestamp="2025-11-25 09:59:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:59:35.105970832 +0000 UTC m=+1380.958964228" watchObservedRunningTime="2025-11-25 09:59:35.113880521 +0000 UTC m=+1380.966873907" Nov 25 09:59:35 crc kubenswrapper[4854]: I1125 09:59:35.160571 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-688b9f5b49-k6p96" Nov 25 09:59:35 crc kubenswrapper[4854]: I1125 09:59:35.160641 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688b9f5b49-k6p96" event={"ID":"9ff2c539-c2ac-4420-b11e-ba3c88af56be","Type":"ContainerStarted","Data":"4c86179bd6bab2075dddfa8a94732a16d2f9183fbcbd2217d914610f2ffa5a4d"} Nov 25 09:59:35 crc kubenswrapper[4854]: I1125 09:59:35.160689 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-84d577c77b-wtjcw" event={"ID":"2981e6fc-b8dc-45b4-a42d-4ccbd0372287","Type":"ContainerDied","Data":"5d12e19861ad5832516713658c0d455b1416fa18edab655544cde2f7eafc8465"} Nov 25 09:59:35 crc kubenswrapper[4854]: I1125 09:59:35.160712 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-84d577c77b-wtjcw" event={"ID":"2981e6fc-b8dc-45b4-a42d-4ccbd0372287","Type":"ContainerDied","Data":"b19043b8e6666132b5e5a19bd16964672de2f1bf7c24609ec24a3d558d32dc71"} Nov 25 09:59:35 crc kubenswrapper[4854]: I1125 09:59:35.160734 4854 scope.go:117] "RemoveContainer" containerID="65334c978879c29700e5cd3a28133a620396a73bce8704b9373c86d368830799" Nov 25 09:59:35 crc kubenswrapper[4854]: I1125 09:59:35.260110 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-84d577c77b-wtjcw"] Nov 25 09:59:35 crc kubenswrapper[4854]: I1125 09:59:35.293075 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-84d577c77b-wtjcw"] Nov 25 09:59:36 crc kubenswrapper[4854]: I1125 09:59:36.107169 4854 generic.go:334] "Generic (PLEG): container finished" podID="f323116e-620c-40d7-8cc7-55315cd06335" containerID="a1a4ea3da684f5af4c1f39b6d1f70b08bba1bded012615b2e6ac6804139f92e2" exitCode=0 Nov 25 09:59:36 crc kubenswrapper[4854]: I1125 09:59:36.107375 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p2hhj" event={"ID":"f323116e-620c-40d7-8cc7-55315cd06335","Type":"ContainerDied","Data":"a1a4ea3da684f5af4c1f39b6d1f70b08bba1bded012615b2e6ac6804139f92e2"} Nov 25 09:59:37 crc kubenswrapper[4854]: I1125 09:59:37.027712 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2981e6fc-b8dc-45b4-a42d-4ccbd0372287" path="/var/lib/kubelet/pods/2981e6fc-b8dc-45b4-a42d-4ccbd0372287/volumes" Nov 25 09:59:37 crc kubenswrapper[4854]: I1125 09:59:37.115833 4854 scope.go:117] "RemoveContainer" containerID="5d12e19861ad5832516713658c0d455b1416fa18edab655544cde2f7eafc8465" Nov 25 09:59:37 crc kubenswrapper[4854]: I1125 09:59:37.538406 4854 scope.go:117] "RemoveContainer" containerID="65334c978879c29700e5cd3a28133a620396a73bce8704b9373c86d368830799" Nov 25 09:59:37 crc kubenswrapper[4854]: E1125 09:59:37.543160 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"65334c978879c29700e5cd3a28133a620396a73bce8704b9373c86d368830799\": container with ID starting with 65334c978879c29700e5cd3a28133a620396a73bce8704b9373c86d368830799 not found: ID does not exist" containerID="65334c978879c29700e5cd3a28133a620396a73bce8704b9373c86d368830799" Nov 25 09:59:37 crc kubenswrapper[4854]: I1125 09:59:37.543214 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"65334c978879c29700e5cd3a28133a620396a73bce8704b9373c86d368830799"} err="failed to get container status \"65334c978879c29700e5cd3a28133a620396a73bce8704b9373c86d368830799\": rpc error: code = NotFound desc = could not find container \"65334c978879c29700e5cd3a28133a620396a73bce8704b9373c86d368830799\": container with ID starting with 65334c978879c29700e5cd3a28133a620396a73bce8704b9373c86d368830799 not found: ID does not exist" Nov 25 09:59:37 crc kubenswrapper[4854]: I1125 09:59:37.543241 4854 scope.go:117] "RemoveContainer" containerID="5d12e19861ad5832516713658c0d455b1416fa18edab655544cde2f7eafc8465" Nov 25 09:59:37 crc kubenswrapper[4854]: E1125 09:59:37.545319 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5d12e19861ad5832516713658c0d455b1416fa18edab655544cde2f7eafc8465\": container with ID starting with 5d12e19861ad5832516713658c0d455b1416fa18edab655544cde2f7eafc8465 not found: ID does not exist" containerID="5d12e19861ad5832516713658c0d455b1416fa18edab655544cde2f7eafc8465" Nov 25 09:59:37 crc kubenswrapper[4854]: I1125 09:59:37.545349 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5d12e19861ad5832516713658c0d455b1416fa18edab655544cde2f7eafc8465"} err="failed to get container status \"5d12e19861ad5832516713658c0d455b1416fa18edab655544cde2f7eafc8465\": rpc error: code = NotFound desc = could not find container \"5d12e19861ad5832516713658c0d455b1416fa18edab655544cde2f7eafc8465\": container with ID starting with 5d12e19861ad5832516713658c0d455b1416fa18edab655544cde2f7eafc8465 not found: ID does not exist" Nov 25 09:59:38 crc kubenswrapper[4854]: I1125 09:59:38.132489 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-55dc74c94f-t88f4" event={"ID":"b8f14245-4267-4921-996d-6d192b4c9953","Type":"ContainerStarted","Data":"f72aa862edcc58565bf471a3e9b31f6609f9c1d12ab0f35ca3b943e40dcd4205"} Nov 25 09:59:38 crc kubenswrapper[4854]: I1125 09:59:38.133901 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-55dc74c94f-t88f4" Nov 25 09:59:38 crc kubenswrapper[4854]: I1125 09:59:38.135484 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-6d5696cb69-gnxts" event={"ID":"964f5abd-0c6c-47cf-82ca-ea31aaf2b522","Type":"ContainerStarted","Data":"a4446b3026f09ed88d298a11c202587670e200fe68483c61cdeda6a4c22d923e"} Nov 25 09:59:38 crc kubenswrapper[4854]: I1125 09:59:38.136057 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-6d5696cb69-gnxts" Nov 25 09:59:38 crc kubenswrapper[4854]: I1125 09:59:38.140448 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b0f90516-2fdf-4c1a-86e2-ea4626d8329f","Type":"ContainerStarted","Data":"e17d03b7d53741134effaebc31ff1f6db7a82d3e6cb72ca42534da755824ef3b"} Nov 25 09:59:38 crc kubenswrapper[4854]: I1125 09:59:38.140695 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b0f90516-2fdf-4c1a-86e2-ea4626d8329f" containerName="ceilometer-central-agent" containerID="cri-o://be204f745c00da1daba9f9591c35691d193c6dbc9bb5a5fe0debeb822639879d" gracePeriod=30 Nov 25 09:59:38 crc kubenswrapper[4854]: I1125 09:59:38.140974 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 09:59:38 crc kubenswrapper[4854]: I1125 09:59:38.141028 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b0f90516-2fdf-4c1a-86e2-ea4626d8329f" containerName="proxy-httpd" containerID="cri-o://e17d03b7d53741134effaebc31ff1f6db7a82d3e6cb72ca42534da755824ef3b" gracePeriod=30 Nov 25 09:59:38 crc kubenswrapper[4854]: I1125 09:59:38.141082 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b0f90516-2fdf-4c1a-86e2-ea4626d8329f" containerName="sg-core" containerID="cri-o://a16594f1f0d17c104622cab8dca5a88b91d0ddf48062c3fac4fb2dea4056c9cb" gracePeriod=30 Nov 25 09:59:38 crc kubenswrapper[4854]: I1125 09:59:38.141122 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b0f90516-2fdf-4c1a-86e2-ea4626d8329f" containerName="ceilometer-notification-agent" containerID="cri-o://dcf0dcf535c06a609c0a2da5894b4cfc5ea413984742ec20806adf0b4170ad32" gracePeriod=30 Nov 25 09:59:38 crc kubenswrapper[4854]: I1125 09:59:38.156195 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p2hhj" event={"ID":"f323116e-620c-40d7-8cc7-55315cd06335","Type":"ContainerStarted","Data":"fdf0b5385ac1cce1db415dac0006e46fdbc371f69ad72562da1ebe61359fe073"} Nov 25 09:59:38 crc kubenswrapper[4854]: I1125 09:59:38.161600 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-55dc74c94f-t88f4" podStartSLOduration=2.82751825 podStartE2EDuration="7.161581155s" podCreationTimestamp="2025-11-25 09:59:31 +0000 UTC" firstStartedPulling="2025-11-25 09:59:32.778776763 +0000 UTC m=+1378.631770139" lastFinishedPulling="2025-11-25 09:59:37.112839668 +0000 UTC m=+1382.965833044" observedRunningTime="2025-11-25 09:59:38.158552931 +0000 UTC m=+1384.011546327" watchObservedRunningTime="2025-11-25 09:59:38.161581155 +0000 UTC m=+1384.014574531" Nov 25 09:59:38 crc kubenswrapper[4854]: I1125 09:59:38.162063 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xgtsh" event={"ID":"3405ae80-37c0-433b-99aa-f9e233d61d86","Type":"ContainerStarted","Data":"1a50e6d236861d931132b441ad8699eed634e496fe5fcc4a0898dd9d7b9cac0d"} Nov 25 09:59:38 crc kubenswrapper[4854]: I1125 09:59:38.197095 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-6d5696cb69-gnxts" podStartSLOduration=2.728607422 podStartE2EDuration="7.197073106s" podCreationTimestamp="2025-11-25 09:59:31 +0000 UTC" firstStartedPulling="2025-11-25 09:59:32.669115129 +0000 UTC m=+1378.522108505" lastFinishedPulling="2025-11-25 09:59:37.137580813 +0000 UTC m=+1382.990574189" observedRunningTime="2025-11-25 09:59:38.182430641 +0000 UTC m=+1384.035424027" watchObservedRunningTime="2025-11-25 09:59:38.197073106 +0000 UTC m=+1384.050066482" Nov 25 09:59:38 crc kubenswrapper[4854]: I1125 09:59:38.232852 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.715460539 podStartE2EDuration="17.232826826s" podCreationTimestamp="2025-11-25 09:59:21 +0000 UTC" firstStartedPulling="2025-11-25 09:59:22.852507263 +0000 UTC m=+1368.705500639" lastFinishedPulling="2025-11-25 09:59:37.36987355 +0000 UTC m=+1383.222866926" observedRunningTime="2025-11-25 09:59:38.212855893 +0000 UTC m=+1384.065849289" watchObservedRunningTime="2025-11-25 09:59:38.232826826 +0000 UTC m=+1384.085820202" Nov 25 09:59:38 crc kubenswrapper[4854]: I1125 09:59:38.253227 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-p2hhj" podStartSLOduration=7.972992386 podStartE2EDuration="12.25320199s" podCreationTimestamp="2025-11-25 09:59:26 +0000 UTC" firstStartedPulling="2025-11-25 09:59:32.978163461 +0000 UTC m=+1378.831156837" lastFinishedPulling="2025-11-25 09:59:37.258373065 +0000 UTC m=+1383.111366441" observedRunningTime="2025-11-25 09:59:38.24671634 +0000 UTC m=+1384.099709726" watchObservedRunningTime="2025-11-25 09:59:38.25320199 +0000 UTC m=+1384.106195366" Nov 25 09:59:39 crc kubenswrapper[4854]: I1125 09:59:39.177311 4854 generic.go:334] "Generic (PLEG): container finished" podID="b0f90516-2fdf-4c1a-86e2-ea4626d8329f" containerID="e17d03b7d53741134effaebc31ff1f6db7a82d3e6cb72ca42534da755824ef3b" exitCode=0 Nov 25 09:59:39 crc kubenswrapper[4854]: I1125 09:59:39.177368 4854 generic.go:334] "Generic (PLEG): container finished" podID="b0f90516-2fdf-4c1a-86e2-ea4626d8329f" containerID="a16594f1f0d17c104622cab8dca5a88b91d0ddf48062c3fac4fb2dea4056c9cb" exitCode=2 Nov 25 09:59:39 crc kubenswrapper[4854]: I1125 09:59:39.177379 4854 generic.go:334] "Generic (PLEG): container finished" podID="b0f90516-2fdf-4c1a-86e2-ea4626d8329f" containerID="dcf0dcf535c06a609c0a2da5894b4cfc5ea413984742ec20806adf0b4170ad32" exitCode=0 Nov 25 09:59:39 crc kubenswrapper[4854]: I1125 09:59:39.177397 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b0f90516-2fdf-4c1a-86e2-ea4626d8329f","Type":"ContainerDied","Data":"e17d03b7d53741134effaebc31ff1f6db7a82d3e6cb72ca42534da755824ef3b"} Nov 25 09:59:39 crc kubenswrapper[4854]: I1125 09:59:39.177501 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b0f90516-2fdf-4c1a-86e2-ea4626d8329f","Type":"ContainerDied","Data":"a16594f1f0d17c104622cab8dca5a88b91d0ddf48062c3fac4fb2dea4056c9cb"} Nov 25 09:59:39 crc kubenswrapper[4854]: I1125 09:59:39.177519 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b0f90516-2fdf-4c1a-86e2-ea4626d8329f","Type":"ContainerDied","Data":"dcf0dcf535c06a609c0a2da5894b4cfc5ea413984742ec20806adf0b4170ad32"} Nov 25 09:59:39 crc kubenswrapper[4854]: I1125 09:59:39.814143 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-f5c9d8946-qfdms"] Nov 25 09:59:39 crc kubenswrapper[4854]: E1125 09:59:39.814883 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2981e6fc-b8dc-45b4-a42d-4ccbd0372287" containerName="neutron-httpd" Nov 25 09:59:39 crc kubenswrapper[4854]: I1125 09:59:39.814900 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="2981e6fc-b8dc-45b4-a42d-4ccbd0372287" containerName="neutron-httpd" Nov 25 09:59:39 crc kubenswrapper[4854]: E1125 09:59:39.814941 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2981e6fc-b8dc-45b4-a42d-4ccbd0372287" containerName="neutron-api" Nov 25 09:59:39 crc kubenswrapper[4854]: I1125 09:59:39.814950 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="2981e6fc-b8dc-45b4-a42d-4ccbd0372287" containerName="neutron-api" Nov 25 09:59:39 crc kubenswrapper[4854]: I1125 09:59:39.815234 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="2981e6fc-b8dc-45b4-a42d-4ccbd0372287" containerName="neutron-api" Nov 25 09:59:39 crc kubenswrapper[4854]: I1125 09:59:39.815266 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="2981e6fc-b8dc-45b4-a42d-4ccbd0372287" containerName="neutron-httpd" Nov 25 09:59:39 crc kubenswrapper[4854]: I1125 09:59:39.816126 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-f5c9d8946-qfdms" Nov 25 09:59:39 crc kubenswrapper[4854]: I1125 09:59:39.831072 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-f5c9d8946-qfdms"] Nov 25 09:59:39 crc kubenswrapper[4854]: I1125 09:59:39.904430 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/15d8094a-7d5b-4f52-8ef0-388820ead440-config-data-custom\") pod \"heat-engine-f5c9d8946-qfdms\" (UID: \"15d8094a-7d5b-4f52-8ef0-388820ead440\") " pod="openstack/heat-engine-f5c9d8946-qfdms" Nov 25 09:59:39 crc kubenswrapper[4854]: I1125 09:59:39.904560 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jh5cf\" (UniqueName: \"kubernetes.io/projected/15d8094a-7d5b-4f52-8ef0-388820ead440-kube-api-access-jh5cf\") pod \"heat-engine-f5c9d8946-qfdms\" (UID: \"15d8094a-7d5b-4f52-8ef0-388820ead440\") " pod="openstack/heat-engine-f5c9d8946-qfdms" Nov 25 09:59:39 crc kubenswrapper[4854]: I1125 09:59:39.904663 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15d8094a-7d5b-4f52-8ef0-388820ead440-combined-ca-bundle\") pod \"heat-engine-f5c9d8946-qfdms\" (UID: \"15d8094a-7d5b-4f52-8ef0-388820ead440\") " pod="openstack/heat-engine-f5c9d8946-qfdms" Nov 25 09:59:39 crc kubenswrapper[4854]: I1125 09:59:39.904713 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15d8094a-7d5b-4f52-8ef0-388820ead440-config-data\") pod \"heat-engine-f5c9d8946-qfdms\" (UID: \"15d8094a-7d5b-4f52-8ef0-388820ead440\") " pod="openstack/heat-engine-f5c9d8946-qfdms" Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.006788 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-5469c8bfd4-mmvtn"] Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.009222 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-5469c8bfd4-mmvtn" Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.012511 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/15d8094a-7d5b-4f52-8ef0-388820ead440-config-data-custom\") pod \"heat-engine-f5c9d8946-qfdms\" (UID: \"15d8094a-7d5b-4f52-8ef0-388820ead440\") " pod="openstack/heat-engine-f5c9d8946-qfdms" Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.012696 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jh5cf\" (UniqueName: \"kubernetes.io/projected/15d8094a-7d5b-4f52-8ef0-388820ead440-kube-api-access-jh5cf\") pod \"heat-engine-f5c9d8946-qfdms\" (UID: \"15d8094a-7d5b-4f52-8ef0-388820ead440\") " pod="openstack/heat-engine-f5c9d8946-qfdms" Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.012820 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15d8094a-7d5b-4f52-8ef0-388820ead440-combined-ca-bundle\") pod \"heat-engine-f5c9d8946-qfdms\" (UID: \"15d8094a-7d5b-4f52-8ef0-388820ead440\") " pod="openstack/heat-engine-f5c9d8946-qfdms" Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.012886 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15d8094a-7d5b-4f52-8ef0-388820ead440-config-data\") pod \"heat-engine-f5c9d8946-qfdms\" (UID: \"15d8094a-7d5b-4f52-8ef0-388820ead440\") " pod="openstack/heat-engine-f5c9d8946-qfdms" Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.046165 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15d8094a-7d5b-4f52-8ef0-388820ead440-config-data\") pod \"heat-engine-f5c9d8946-qfdms\" (UID: \"15d8094a-7d5b-4f52-8ef0-388820ead440\") " pod="openstack/heat-engine-f5c9d8946-qfdms" Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.060120 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15d8094a-7d5b-4f52-8ef0-388820ead440-combined-ca-bundle\") pod \"heat-engine-f5c9d8946-qfdms\" (UID: \"15d8094a-7d5b-4f52-8ef0-388820ead440\") " pod="openstack/heat-engine-f5c9d8946-qfdms" Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.076105 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/15d8094a-7d5b-4f52-8ef0-388820ead440-config-data-custom\") pod \"heat-engine-f5c9d8946-qfdms\" (UID: \"15d8094a-7d5b-4f52-8ef0-388820ead440\") " pod="openstack/heat-engine-f5c9d8946-qfdms" Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.090006 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jh5cf\" (UniqueName: \"kubernetes.io/projected/15d8094a-7d5b-4f52-8ef0-388820ead440-kube-api-access-jh5cf\") pod \"heat-engine-f5c9d8946-qfdms\" (UID: \"15d8094a-7d5b-4f52-8ef0-388820ead440\") " pod="openstack/heat-engine-f5c9d8946-qfdms" Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.122498 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db0965e6-0374-4b14-875e-8557c346815e-combined-ca-bundle\") pod \"heat-cfnapi-5469c8bfd4-mmvtn\" (UID: \"db0965e6-0374-4b14-875e-8557c346815e\") " pod="openstack/heat-cfnapi-5469c8bfd4-mmvtn" Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.122576 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fn8rg\" (UniqueName: \"kubernetes.io/projected/db0965e6-0374-4b14-875e-8557c346815e-kube-api-access-fn8rg\") pod \"heat-cfnapi-5469c8bfd4-mmvtn\" (UID: \"db0965e6-0374-4b14-875e-8557c346815e\") " pod="openstack/heat-cfnapi-5469c8bfd4-mmvtn" Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.122625 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/db0965e6-0374-4b14-875e-8557c346815e-config-data-custom\") pod \"heat-cfnapi-5469c8bfd4-mmvtn\" (UID: \"db0965e6-0374-4b14-875e-8557c346815e\") " pod="openstack/heat-cfnapi-5469c8bfd4-mmvtn" Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.122639 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db0965e6-0374-4b14-875e-8557c346815e-config-data\") pod \"heat-cfnapi-5469c8bfd4-mmvtn\" (UID: \"db0965e6-0374-4b14-875e-8557c346815e\") " pod="openstack/heat-cfnapi-5469c8bfd4-mmvtn" Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.129759 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-d874db4c8-mzdgt"] Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.131880 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-d874db4c8-mzdgt" Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.138134 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-f5c9d8946-qfdms" Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.158739 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-5469c8bfd4-mmvtn"] Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.196895 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-d874db4c8-mzdgt"] Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.224151 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qkx4k\" (UniqueName: \"kubernetes.io/projected/2fc43285-4e2b-4d05-b52b-b446b200723e-kube-api-access-qkx4k\") pod \"heat-api-d874db4c8-mzdgt\" (UID: \"2fc43285-4e2b-4d05-b52b-b446b200723e\") " pod="openstack/heat-api-d874db4c8-mzdgt" Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.224207 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fc43285-4e2b-4d05-b52b-b446b200723e-combined-ca-bundle\") pod \"heat-api-d874db4c8-mzdgt\" (UID: \"2fc43285-4e2b-4d05-b52b-b446b200723e\") " pod="openstack/heat-api-d874db4c8-mzdgt" Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.224237 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/db0965e6-0374-4b14-875e-8557c346815e-config-data-custom\") pod \"heat-cfnapi-5469c8bfd4-mmvtn\" (UID: \"db0965e6-0374-4b14-875e-8557c346815e\") " pod="openstack/heat-cfnapi-5469c8bfd4-mmvtn" Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.224253 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db0965e6-0374-4b14-875e-8557c346815e-config-data\") pod \"heat-cfnapi-5469c8bfd4-mmvtn\" (UID: \"db0965e6-0374-4b14-875e-8557c346815e\") " pod="openstack/heat-cfnapi-5469c8bfd4-mmvtn" Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.224295 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fc43285-4e2b-4d05-b52b-b446b200723e-config-data\") pod \"heat-api-d874db4c8-mzdgt\" (UID: \"2fc43285-4e2b-4d05-b52b-b446b200723e\") " pod="openstack/heat-api-d874db4c8-mzdgt" Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.224459 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2fc43285-4e2b-4d05-b52b-b446b200723e-config-data-custom\") pod \"heat-api-d874db4c8-mzdgt\" (UID: \"2fc43285-4e2b-4d05-b52b-b446b200723e\") " pod="openstack/heat-api-d874db4c8-mzdgt" Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.224488 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db0965e6-0374-4b14-875e-8557c346815e-combined-ca-bundle\") pod \"heat-cfnapi-5469c8bfd4-mmvtn\" (UID: \"db0965e6-0374-4b14-875e-8557c346815e\") " pod="openstack/heat-cfnapi-5469c8bfd4-mmvtn" Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.224544 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fn8rg\" (UniqueName: \"kubernetes.io/projected/db0965e6-0374-4b14-875e-8557c346815e-kube-api-access-fn8rg\") pod \"heat-cfnapi-5469c8bfd4-mmvtn\" (UID: \"db0965e6-0374-4b14-875e-8557c346815e\") " pod="openstack/heat-cfnapi-5469c8bfd4-mmvtn" Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.241375 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db0965e6-0374-4b14-875e-8557c346815e-combined-ca-bundle\") pod \"heat-cfnapi-5469c8bfd4-mmvtn\" (UID: \"db0965e6-0374-4b14-875e-8557c346815e\") " pod="openstack/heat-cfnapi-5469c8bfd4-mmvtn" Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.252950 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db0965e6-0374-4b14-875e-8557c346815e-config-data\") pod \"heat-cfnapi-5469c8bfd4-mmvtn\" (UID: \"db0965e6-0374-4b14-875e-8557c346815e\") " pod="openstack/heat-cfnapi-5469c8bfd4-mmvtn" Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.256625 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/db0965e6-0374-4b14-875e-8557c346815e-config-data-custom\") pod \"heat-cfnapi-5469c8bfd4-mmvtn\" (UID: \"db0965e6-0374-4b14-875e-8557c346815e\") " pod="openstack/heat-cfnapi-5469c8bfd4-mmvtn" Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.276748 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fn8rg\" (UniqueName: \"kubernetes.io/projected/db0965e6-0374-4b14-875e-8557c346815e-kube-api-access-fn8rg\") pod \"heat-cfnapi-5469c8bfd4-mmvtn\" (UID: \"db0965e6-0374-4b14-875e-8557c346815e\") " pod="openstack/heat-cfnapi-5469c8bfd4-mmvtn" Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.326183 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2fc43285-4e2b-4d05-b52b-b446b200723e-config-data-custom\") pod \"heat-api-d874db4c8-mzdgt\" (UID: \"2fc43285-4e2b-4d05-b52b-b446b200723e\") " pod="openstack/heat-api-d874db4c8-mzdgt" Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.326301 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qkx4k\" (UniqueName: \"kubernetes.io/projected/2fc43285-4e2b-4d05-b52b-b446b200723e-kube-api-access-qkx4k\") pod \"heat-api-d874db4c8-mzdgt\" (UID: \"2fc43285-4e2b-4d05-b52b-b446b200723e\") " pod="openstack/heat-api-d874db4c8-mzdgt" Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.326340 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fc43285-4e2b-4d05-b52b-b446b200723e-combined-ca-bundle\") pod \"heat-api-d874db4c8-mzdgt\" (UID: \"2fc43285-4e2b-4d05-b52b-b446b200723e\") " pod="openstack/heat-api-d874db4c8-mzdgt" Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.326398 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fc43285-4e2b-4d05-b52b-b446b200723e-config-data\") pod \"heat-api-d874db4c8-mzdgt\" (UID: \"2fc43285-4e2b-4d05-b52b-b446b200723e\") " pod="openstack/heat-api-d874db4c8-mzdgt" Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.336352 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fc43285-4e2b-4d05-b52b-b446b200723e-config-data\") pod \"heat-api-d874db4c8-mzdgt\" (UID: \"2fc43285-4e2b-4d05-b52b-b446b200723e\") " pod="openstack/heat-api-d874db4c8-mzdgt" Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.341508 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2fc43285-4e2b-4d05-b52b-b446b200723e-config-data-custom\") pod \"heat-api-d874db4c8-mzdgt\" (UID: \"2fc43285-4e2b-4d05-b52b-b446b200723e\") " pod="openstack/heat-api-d874db4c8-mzdgt" Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.345689 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fc43285-4e2b-4d05-b52b-b446b200723e-combined-ca-bundle\") pod \"heat-api-d874db4c8-mzdgt\" (UID: \"2fc43285-4e2b-4d05-b52b-b446b200723e\") " pod="openstack/heat-api-d874db4c8-mzdgt" Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.376245 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-5469c8bfd4-mmvtn" Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.382971 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qkx4k\" (UniqueName: \"kubernetes.io/projected/2fc43285-4e2b-4d05-b52b-b446b200723e-kube-api-access-qkx4k\") pod \"heat-api-d874db4c8-mzdgt\" (UID: \"2fc43285-4e2b-4d05-b52b-b446b200723e\") " pod="openstack/heat-api-d874db4c8-mzdgt" Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.386851 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-d874db4c8-mzdgt" Nov 25 09:59:40 crc kubenswrapper[4854]: I1125 09:59:40.865429 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-f5c9d8946-qfdms"] Nov 25 09:59:41 crc kubenswrapper[4854]: I1125 09:59:41.007907 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-5469c8bfd4-mmvtn"] Nov 25 09:59:41 crc kubenswrapper[4854]: W1125 09:59:41.011837 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddb0965e6_0374_4b14_875e_8557c346815e.slice/crio-5d11d06b24dd4efdae5bd14ec30aa76bfb9485473af1462fd585e606e35fd6c6 WatchSource:0}: Error finding container 5d11d06b24dd4efdae5bd14ec30aa76bfb9485473af1462fd585e606e35fd6c6: Status 404 returned error can't find the container with id 5d11d06b24dd4efdae5bd14ec30aa76bfb9485473af1462fd585e606e35fd6c6 Nov 25 09:59:41 crc kubenswrapper[4854]: I1125 09:59:41.136230 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-d874db4c8-mzdgt"] Nov 25 09:59:41 crc kubenswrapper[4854]: W1125 09:59:41.141790 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2fc43285_4e2b_4d05_b52b_b446b200723e.slice/crio-b522c9910772b6a8384977365882529f9115bb7a4d5911765cda11f2058011c4 WatchSource:0}: Error finding container b522c9910772b6a8384977365882529f9115bb7a4d5911765cda11f2058011c4: Status 404 returned error can't find the container with id b522c9910772b6a8384977365882529f9115bb7a4d5911765cda11f2058011c4 Nov 25 09:59:41 crc kubenswrapper[4854]: I1125 09:59:41.232281 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-d874db4c8-mzdgt" event={"ID":"2fc43285-4e2b-4d05-b52b-b446b200723e","Type":"ContainerStarted","Data":"b522c9910772b6a8384977365882529f9115bb7a4d5911765cda11f2058011c4"} Nov 25 09:59:41 crc kubenswrapper[4854]: I1125 09:59:41.233306 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-5469c8bfd4-mmvtn" event={"ID":"db0965e6-0374-4b14-875e-8557c346815e","Type":"ContainerStarted","Data":"5d11d06b24dd4efdae5bd14ec30aa76bfb9485473af1462fd585e606e35fd6c6"} Nov 25 09:59:41 crc kubenswrapper[4854]: I1125 09:59:41.238825 4854 generic.go:334] "Generic (PLEG): container finished" podID="3405ae80-37c0-433b-99aa-f9e233d61d86" containerID="1a50e6d236861d931132b441ad8699eed634e496fe5fcc4a0898dd9d7b9cac0d" exitCode=0 Nov 25 09:59:41 crc kubenswrapper[4854]: I1125 09:59:41.238982 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xgtsh" event={"ID":"3405ae80-37c0-433b-99aa-f9e233d61d86","Type":"ContainerDied","Data":"1a50e6d236861d931132b441ad8699eed634e496fe5fcc4a0898dd9d7b9cac0d"} Nov 25 09:59:41 crc kubenswrapper[4854]: I1125 09:59:41.241207 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-f5c9d8946-qfdms" event={"ID":"15d8094a-7d5b-4f52-8ef0-388820ead440","Type":"ContainerStarted","Data":"73ec5619bc7ec39b579dad55992bd7d6a28694fb9e422d3e23d8473b47a0f2f9"} Nov 25 09:59:41 crc kubenswrapper[4854]: I1125 09:59:41.241248 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-f5c9d8946-qfdms" event={"ID":"15d8094a-7d5b-4f52-8ef0-388820ead440","Type":"ContainerStarted","Data":"7e80a8144609011e5483943f3f6f2c54b90ef0aa8c5a8ec48fee6e8060247b7d"} Nov 25 09:59:41 crc kubenswrapper[4854]: I1125 09:59:41.242210 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-f5c9d8946-qfdms" Nov 25 09:59:41 crc kubenswrapper[4854]: I1125 09:59:41.289445 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-f5c9d8946-qfdms" podStartSLOduration=2.289428376 podStartE2EDuration="2.289428376s" podCreationTimestamp="2025-11-25 09:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:59:41.272558619 +0000 UTC m=+1387.125551995" watchObservedRunningTime="2025-11-25 09:59:41.289428376 +0000 UTC m=+1387.142421752" Nov 25 09:59:41 crc kubenswrapper[4854]: I1125 09:59:41.657805 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-688b9f5b49-k6p96" Nov 25 09:59:41 crc kubenswrapper[4854]: I1125 09:59:41.761236 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-wrpfb"] Nov 25 09:59:41 crc kubenswrapper[4854]: I1125 09:59:41.761883 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6578955fd5-wrpfb" podUID="f7c0062e-6699-45fa-a1fb-a9efb44a80e5" containerName="dnsmasq-dns" containerID="cri-o://be34493ffb6896f322a2bab960edbfc290b1fe6331eda0bcf305befe492a80b3" gracePeriod=10 Nov 25 09:59:42 crc kubenswrapper[4854]: I1125 09:59:42.292213 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xgtsh" event={"ID":"3405ae80-37c0-433b-99aa-f9e233d61d86","Type":"ContainerStarted","Data":"25ca0b9ef2c380451ae442759fa41190958b15a48e7eb1eae65d04f5d58604af"} Nov 25 09:59:42 crc kubenswrapper[4854]: I1125 09:59:42.315033 4854 generic.go:334] "Generic (PLEG): container finished" podID="f7c0062e-6699-45fa-a1fb-a9efb44a80e5" containerID="be34493ffb6896f322a2bab960edbfc290b1fe6331eda0bcf305befe492a80b3" exitCode=0 Nov 25 09:59:42 crc kubenswrapper[4854]: I1125 09:59:42.315108 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-wrpfb" event={"ID":"f7c0062e-6699-45fa-a1fb-a9efb44a80e5","Type":"ContainerDied","Data":"be34493ffb6896f322a2bab960edbfc290b1fe6331eda0bcf305befe492a80b3"} Nov 25 09:59:42 crc kubenswrapper[4854]: I1125 09:59:42.317488 4854 generic.go:334] "Generic (PLEG): container finished" podID="2fc43285-4e2b-4d05-b52b-b446b200723e" containerID="ba90533bff9e52047da741b984816523718015a68da7fd4eabff51776ca073ce" exitCode=1 Nov 25 09:59:42 crc kubenswrapper[4854]: I1125 09:59:42.317625 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-d874db4c8-mzdgt" event={"ID":"2fc43285-4e2b-4d05-b52b-b446b200723e","Type":"ContainerDied","Data":"ba90533bff9e52047da741b984816523718015a68da7fd4eabff51776ca073ce"} Nov 25 09:59:42 crc kubenswrapper[4854]: I1125 09:59:42.318970 4854 scope.go:117] "RemoveContainer" containerID="ba90533bff9e52047da741b984816523718015a68da7fd4eabff51776ca073ce" Nov 25 09:59:42 crc kubenswrapper[4854]: I1125 09:59:42.320146 4854 scope.go:117] "RemoveContainer" containerID="c1c8da13f8854beb58749ed85ef218122c95a66aeb0ffb17bfab3f26f686eafc" Nov 25 09:59:42 crc kubenswrapper[4854]: I1125 09:59:42.324187 4854 generic.go:334] "Generic (PLEG): container finished" podID="db0965e6-0374-4b14-875e-8557c346815e" containerID="c1c8da13f8854beb58749ed85ef218122c95a66aeb0ffb17bfab3f26f686eafc" exitCode=1 Nov 25 09:59:42 crc kubenswrapper[4854]: I1125 09:59:42.325523 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-5469c8bfd4-mmvtn" event={"ID":"db0965e6-0374-4b14-875e-8557c346815e","Type":"ContainerDied","Data":"c1c8da13f8854beb58749ed85ef218122c95a66aeb0ffb17bfab3f26f686eafc"} Nov 25 09:59:42 crc kubenswrapper[4854]: I1125 09:59:42.360504 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-xgtsh" podStartSLOduration=2.687483984 podStartE2EDuration="10.36047986s" podCreationTimestamp="2025-11-25 09:59:32 +0000 UTC" firstStartedPulling="2025-11-25 09:59:34.045974925 +0000 UTC m=+1379.898968301" lastFinishedPulling="2025-11-25 09:59:41.718970791 +0000 UTC m=+1387.571964177" observedRunningTime="2025-11-25 09:59:42.328835564 +0000 UTC m=+1388.181828950" watchObservedRunningTime="2025-11-25 09:59:42.36047986 +0000 UTC m=+1388.213473236" Nov 25 09:59:42 crc kubenswrapper[4854]: I1125 09:59:42.531394 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-xgtsh" Nov 25 09:59:42 crc kubenswrapper[4854]: I1125 09:59:42.531853 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-xgtsh" Nov 25 09:59:42 crc kubenswrapper[4854]: I1125 09:59:42.619340 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-wrpfb" Nov 25 09:59:42 crc kubenswrapper[4854]: I1125 09:59:42.701640 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f7c0062e-6699-45fa-a1fb-a9efb44a80e5-dns-svc\") pod \"f7c0062e-6699-45fa-a1fb-a9efb44a80e5\" (UID: \"f7c0062e-6699-45fa-a1fb-a9efb44a80e5\") " Nov 25 09:59:42 crc kubenswrapper[4854]: I1125 09:59:42.701751 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f7c0062e-6699-45fa-a1fb-a9efb44a80e5-dns-swift-storage-0\") pod \"f7c0062e-6699-45fa-a1fb-a9efb44a80e5\" (UID: \"f7c0062e-6699-45fa-a1fb-a9efb44a80e5\") " Nov 25 09:59:42 crc kubenswrapper[4854]: I1125 09:59:42.701899 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7c0062e-6699-45fa-a1fb-a9efb44a80e5-config\") pod \"f7c0062e-6699-45fa-a1fb-a9efb44a80e5\" (UID: \"f7c0062e-6699-45fa-a1fb-a9efb44a80e5\") " Nov 25 09:59:42 crc kubenswrapper[4854]: I1125 09:59:42.701933 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f7c0062e-6699-45fa-a1fb-a9efb44a80e5-ovsdbserver-sb\") pod \"f7c0062e-6699-45fa-a1fb-a9efb44a80e5\" (UID: \"f7c0062e-6699-45fa-a1fb-a9efb44a80e5\") " Nov 25 09:59:42 crc kubenswrapper[4854]: I1125 09:59:42.702570 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4h4t\" (UniqueName: \"kubernetes.io/projected/f7c0062e-6699-45fa-a1fb-a9efb44a80e5-kube-api-access-d4h4t\") pod \"f7c0062e-6699-45fa-a1fb-a9efb44a80e5\" (UID: \"f7c0062e-6699-45fa-a1fb-a9efb44a80e5\") " Nov 25 09:59:42 crc kubenswrapper[4854]: I1125 09:59:42.702759 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f7c0062e-6699-45fa-a1fb-a9efb44a80e5-ovsdbserver-nb\") pod \"f7c0062e-6699-45fa-a1fb-a9efb44a80e5\" (UID: \"f7c0062e-6699-45fa-a1fb-a9efb44a80e5\") " Nov 25 09:59:42 crc kubenswrapper[4854]: I1125 09:59:42.741853 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7c0062e-6699-45fa-a1fb-a9efb44a80e5-kube-api-access-d4h4t" (OuterVolumeSpecName: "kube-api-access-d4h4t") pod "f7c0062e-6699-45fa-a1fb-a9efb44a80e5" (UID: "f7c0062e-6699-45fa-a1fb-a9efb44a80e5"). InnerVolumeSpecName "kube-api-access-d4h4t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:59:42 crc kubenswrapper[4854]: I1125 09:59:42.772170 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f7c0062e-6699-45fa-a1fb-a9efb44a80e5-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "f7c0062e-6699-45fa-a1fb-a9efb44a80e5" (UID: "f7c0062e-6699-45fa-a1fb-a9efb44a80e5"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:59:42 crc kubenswrapper[4854]: I1125 09:59:42.780119 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f7c0062e-6699-45fa-a1fb-a9efb44a80e5-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f7c0062e-6699-45fa-a1fb-a9efb44a80e5" (UID: "f7c0062e-6699-45fa-a1fb-a9efb44a80e5"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:59:42 crc kubenswrapper[4854]: I1125 09:59:42.811293 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-c7l4t" podUID="bb7eee5b-75e6-483b-a68e-1d6e39814690" containerName="registry-server" probeResult="failure" output=< Nov 25 09:59:42 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 09:59:42 crc kubenswrapper[4854]: > Nov 25 09:59:42 crc kubenswrapper[4854]: I1125 09:59:42.812076 4854 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f7c0062e-6699-45fa-a1fb-a9efb44a80e5-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:42 crc kubenswrapper[4854]: I1125 09:59:42.812101 4854 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f7c0062e-6699-45fa-a1fb-a9efb44a80e5-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:42 crc kubenswrapper[4854]: I1125 09:59:42.812118 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4h4t\" (UniqueName: \"kubernetes.io/projected/f7c0062e-6699-45fa-a1fb-a9efb44a80e5-kube-api-access-d4h4t\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:42 crc kubenswrapper[4854]: I1125 09:59:42.815147 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f7c0062e-6699-45fa-a1fb-a9efb44a80e5-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f7c0062e-6699-45fa-a1fb-a9efb44a80e5" (UID: "f7c0062e-6699-45fa-a1fb-a9efb44a80e5"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:59:42 crc kubenswrapper[4854]: I1125 09:59:42.854002 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f7c0062e-6699-45fa-a1fb-a9efb44a80e5-config" (OuterVolumeSpecName: "config") pod "f7c0062e-6699-45fa-a1fb-a9efb44a80e5" (UID: "f7c0062e-6699-45fa-a1fb-a9efb44a80e5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:59:42 crc kubenswrapper[4854]: I1125 09:59:42.859633 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f7c0062e-6699-45fa-a1fb-a9efb44a80e5-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f7c0062e-6699-45fa-a1fb-a9efb44a80e5" (UID: "f7c0062e-6699-45fa-a1fb-a9efb44a80e5"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:59:42 crc kubenswrapper[4854]: I1125 09:59:42.914632 4854 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f7c0062e-6699-45fa-a1fb-a9efb44a80e5-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:42 crc kubenswrapper[4854]: I1125 09:59:42.914678 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7c0062e-6699-45fa-a1fb-a9efb44a80e5-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:42 crc kubenswrapper[4854]: I1125 09:59:42.914689 4854 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f7c0062e-6699-45fa-a1fb-a9efb44a80e5-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:43 crc kubenswrapper[4854]: I1125 09:59:43.343786 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-wrpfb" event={"ID":"f7c0062e-6699-45fa-a1fb-a9efb44a80e5","Type":"ContainerDied","Data":"0a19ddae4c512a87d58e6cd6716c6896f5111471f666bd844f28bf3840fc37ab"} Nov 25 09:59:43 crc kubenswrapper[4854]: I1125 09:59:43.344988 4854 scope.go:117] "RemoveContainer" containerID="be34493ffb6896f322a2bab960edbfc290b1fe6331eda0bcf305befe492a80b3" Nov 25 09:59:43 crc kubenswrapper[4854]: I1125 09:59:43.345298 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-wrpfb" Nov 25 09:59:43 crc kubenswrapper[4854]: I1125 09:59:43.355468 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-d874db4c8-mzdgt" event={"ID":"2fc43285-4e2b-4d05-b52b-b446b200723e","Type":"ContainerStarted","Data":"53f9f165f353b1956da53b03a71fc50c1ecfbf30fb2f590408f1a56169f39075"} Nov 25 09:59:43 crc kubenswrapper[4854]: I1125 09:59:43.356585 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-d874db4c8-mzdgt" Nov 25 09:59:43 crc kubenswrapper[4854]: I1125 09:59:43.372404 4854 generic.go:334] "Generic (PLEG): container finished" podID="db0965e6-0374-4b14-875e-8557c346815e" containerID="b71373c5ed36e6f9cd07aefe202391580817607457b50933b56db407c7162435" exitCode=1 Nov 25 09:59:43 crc kubenswrapper[4854]: I1125 09:59:43.372768 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-5469c8bfd4-mmvtn" event={"ID":"db0965e6-0374-4b14-875e-8557c346815e","Type":"ContainerDied","Data":"b71373c5ed36e6f9cd07aefe202391580817607457b50933b56db407c7162435"} Nov 25 09:59:43 crc kubenswrapper[4854]: I1125 09:59:43.374062 4854 scope.go:117] "RemoveContainer" containerID="b71373c5ed36e6f9cd07aefe202391580817607457b50933b56db407c7162435" Nov 25 09:59:43 crc kubenswrapper[4854]: E1125 09:59:43.374322 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-5469c8bfd4-mmvtn_openstack(db0965e6-0374-4b14-875e-8557c346815e)\"" pod="openstack/heat-cfnapi-5469c8bfd4-mmvtn" podUID="db0965e6-0374-4b14-875e-8557c346815e" Nov 25 09:59:43 crc kubenswrapper[4854]: I1125 09:59:43.382192 4854 scope.go:117] "RemoveContainer" containerID="eef673bc8954710d47f2671f29b11efa2f1d67980fc641ff528d145d0e5bd48b" Nov 25 09:59:43 crc kubenswrapper[4854]: I1125 09:59:43.394266 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-wrpfb"] Nov 25 09:59:43 crc kubenswrapper[4854]: I1125 09:59:43.417537 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-wrpfb"] Nov 25 09:59:43 crc kubenswrapper[4854]: I1125 09:59:43.422167 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-d874db4c8-mzdgt" podStartSLOduration=4.422141574 podStartE2EDuration="4.422141574s" podCreationTimestamp="2025-11-25 09:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:59:43.387745752 +0000 UTC m=+1389.240739128" watchObservedRunningTime="2025-11-25 09:59:43.422141574 +0000 UTC m=+1389.275134950" Nov 25 09:59:43 crc kubenswrapper[4854]: I1125 09:59:43.428603 4854 scope.go:117] "RemoveContainer" containerID="c1c8da13f8854beb58749ed85ef218122c95a66aeb0ffb17bfab3f26f686eafc" Nov 25 09:59:43 crc kubenswrapper[4854]: I1125 09:59:43.592636 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-xgtsh" podUID="3405ae80-37c0-433b-99aa-f9e233d61d86" containerName="registry-server" probeResult="failure" output=< Nov 25 09:59:43 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 09:59:43 crc kubenswrapper[4854]: > Nov 25 09:59:43 crc kubenswrapper[4854]: I1125 09:59:43.944645 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-55dc74c94f-t88f4"] Nov 25 09:59:43 crc kubenswrapper[4854]: I1125 09:59:43.947260 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-api-55dc74c94f-t88f4" podUID="b8f14245-4267-4921-996d-6d192b4c9953" containerName="heat-api" containerID="cri-o://f72aa862edcc58565bf471a3e9b31f6609f9c1d12ab0f35ca3b943e40dcd4205" gracePeriod=60 Nov 25 09:59:43 crc kubenswrapper[4854]: I1125 09:59:43.964708 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-api-55dc74c94f-t88f4" podUID="b8f14245-4267-4921-996d-6d192b4c9953" containerName="heat-api" probeResult="failure" output="Get \"http://10.217.0.216:8004/healthcheck\": EOF" Nov 25 09:59:43 crc kubenswrapper[4854]: I1125 09:59:43.968041 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-6d5696cb69-gnxts"] Nov 25 09:59:43 crc kubenswrapper[4854]: I1125 09:59:43.968259 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-cfnapi-6d5696cb69-gnxts" podUID="964f5abd-0c6c-47cf-82ca-ea31aaf2b522" containerName="heat-cfnapi" containerID="cri-o://a4446b3026f09ed88d298a11c202587670e200fe68483c61cdeda6a4c22d923e" gracePeriod=60 Nov 25 09:59:43 crc kubenswrapper[4854]: I1125 09:59:43.994665 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-7cf6b8b6cd-csh8n"] Nov 25 09:59:43 crc kubenswrapper[4854]: E1125 09:59:43.995319 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7c0062e-6699-45fa-a1fb-a9efb44a80e5" containerName="init" Nov 25 09:59:43 crc kubenswrapper[4854]: I1125 09:59:43.995346 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7c0062e-6699-45fa-a1fb-a9efb44a80e5" containerName="init" Nov 25 09:59:43 crc kubenswrapper[4854]: E1125 09:59:43.995411 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7c0062e-6699-45fa-a1fb-a9efb44a80e5" containerName="dnsmasq-dns" Nov 25 09:59:43 crc kubenswrapper[4854]: I1125 09:59:43.995421 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7c0062e-6699-45fa-a1fb-a9efb44a80e5" containerName="dnsmasq-dns" Nov 25 09:59:43 crc kubenswrapper[4854]: I1125 09:59:43.995765 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7c0062e-6699-45fa-a1fb-a9efb44a80e5" containerName="dnsmasq-dns" Nov 25 09:59:43 crc kubenswrapper[4854]: I1125 09:59:43.996833 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-7cf6b8b6cd-csh8n" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.002171 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-internal-svc" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.002457 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-public-svc" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.036213 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-7cf6b8b6cd-csh8n"] Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.038824 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-cfnapi-6d5696cb69-gnxts" podUID="964f5abd-0c6c-47cf-82ca-ea31aaf2b522" containerName="heat-cfnapi" probeResult="failure" output="Get \"http://10.217.0.215:8000/healthcheck\": EOF" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.135805 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-756b755c6-wqm2k"] Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.141157 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-756b755c6-wqm2k" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.147026 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-internal-svc" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.147396 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-public-svc" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.149567 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-74v5b\" (UniqueName: \"kubernetes.io/projected/57723d21-6e34-4a5b-8063-9d5b97022cfc-kube-api-access-74v5b\") pod \"heat-api-7cf6b8b6cd-csh8n\" (UID: \"57723d21-6e34-4a5b-8063-9d5b97022cfc\") " pod="openstack/heat-api-7cf6b8b6cd-csh8n" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.149794 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/57723d21-6e34-4a5b-8063-9d5b97022cfc-config-data-custom\") pod \"heat-api-7cf6b8b6cd-csh8n\" (UID: \"57723d21-6e34-4a5b-8063-9d5b97022cfc\") " pod="openstack/heat-api-7cf6b8b6cd-csh8n" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.149833 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57723d21-6e34-4a5b-8063-9d5b97022cfc-config-data\") pod \"heat-api-7cf6b8b6cd-csh8n\" (UID: \"57723d21-6e34-4a5b-8063-9d5b97022cfc\") " pod="openstack/heat-api-7cf6b8b6cd-csh8n" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.149927 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57723d21-6e34-4a5b-8063-9d5b97022cfc-combined-ca-bundle\") pod \"heat-api-7cf6b8b6cd-csh8n\" (UID: \"57723d21-6e34-4a5b-8063-9d5b97022cfc\") " pod="openstack/heat-api-7cf6b8b6cd-csh8n" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.149961 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/57723d21-6e34-4a5b-8063-9d5b97022cfc-internal-tls-certs\") pod \"heat-api-7cf6b8b6cd-csh8n\" (UID: \"57723d21-6e34-4a5b-8063-9d5b97022cfc\") " pod="openstack/heat-api-7cf6b8b6cd-csh8n" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.150013 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/57723d21-6e34-4a5b-8063-9d5b97022cfc-public-tls-certs\") pod \"heat-api-7cf6b8b6cd-csh8n\" (UID: \"57723d21-6e34-4a5b-8063-9d5b97022cfc\") " pod="openstack/heat-api-7cf6b8b6cd-csh8n" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.177614 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-756b755c6-wqm2k"] Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.251613 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/57723d21-6e34-4a5b-8063-9d5b97022cfc-config-data-custom\") pod \"heat-api-7cf6b8b6cd-csh8n\" (UID: \"57723d21-6e34-4a5b-8063-9d5b97022cfc\") " pod="openstack/heat-api-7cf6b8b6cd-csh8n" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.251690 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57723d21-6e34-4a5b-8063-9d5b97022cfc-config-data\") pod \"heat-api-7cf6b8b6cd-csh8n\" (UID: \"57723d21-6e34-4a5b-8063-9d5b97022cfc\") " pod="openstack/heat-api-7cf6b8b6cd-csh8n" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.251718 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/aeb8dcf6-3640-4930-8663-be372820d69c-config-data-custom\") pod \"heat-cfnapi-756b755c6-wqm2k\" (UID: \"aeb8dcf6-3640-4930-8663-be372820d69c\") " pod="openstack/heat-cfnapi-756b755c6-wqm2k" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.251746 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k87cl\" (UniqueName: \"kubernetes.io/projected/aeb8dcf6-3640-4930-8663-be372820d69c-kube-api-access-k87cl\") pod \"heat-cfnapi-756b755c6-wqm2k\" (UID: \"aeb8dcf6-3640-4930-8663-be372820d69c\") " pod="openstack/heat-cfnapi-756b755c6-wqm2k" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.251781 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/aeb8dcf6-3640-4930-8663-be372820d69c-internal-tls-certs\") pod \"heat-cfnapi-756b755c6-wqm2k\" (UID: \"aeb8dcf6-3640-4930-8663-be372820d69c\") " pod="openstack/heat-cfnapi-756b755c6-wqm2k" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.251818 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aeb8dcf6-3640-4930-8663-be372820d69c-combined-ca-bundle\") pod \"heat-cfnapi-756b755c6-wqm2k\" (UID: \"aeb8dcf6-3640-4930-8663-be372820d69c\") " pod="openstack/heat-cfnapi-756b755c6-wqm2k" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.251863 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aeb8dcf6-3640-4930-8663-be372820d69c-config-data\") pod \"heat-cfnapi-756b755c6-wqm2k\" (UID: \"aeb8dcf6-3640-4930-8663-be372820d69c\") " pod="openstack/heat-cfnapi-756b755c6-wqm2k" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.251884 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57723d21-6e34-4a5b-8063-9d5b97022cfc-combined-ca-bundle\") pod \"heat-api-7cf6b8b6cd-csh8n\" (UID: \"57723d21-6e34-4a5b-8063-9d5b97022cfc\") " pod="openstack/heat-api-7cf6b8b6cd-csh8n" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.251944 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/57723d21-6e34-4a5b-8063-9d5b97022cfc-internal-tls-certs\") pod \"heat-api-7cf6b8b6cd-csh8n\" (UID: \"57723d21-6e34-4a5b-8063-9d5b97022cfc\") " pod="openstack/heat-api-7cf6b8b6cd-csh8n" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.251995 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/57723d21-6e34-4a5b-8063-9d5b97022cfc-public-tls-certs\") pod \"heat-api-7cf6b8b6cd-csh8n\" (UID: \"57723d21-6e34-4a5b-8063-9d5b97022cfc\") " pod="openstack/heat-api-7cf6b8b6cd-csh8n" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.252246 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-74v5b\" (UniqueName: \"kubernetes.io/projected/57723d21-6e34-4a5b-8063-9d5b97022cfc-kube-api-access-74v5b\") pod \"heat-api-7cf6b8b6cd-csh8n\" (UID: \"57723d21-6e34-4a5b-8063-9d5b97022cfc\") " pod="openstack/heat-api-7cf6b8b6cd-csh8n" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.253061 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/aeb8dcf6-3640-4930-8663-be372820d69c-public-tls-certs\") pod \"heat-cfnapi-756b755c6-wqm2k\" (UID: \"aeb8dcf6-3640-4930-8663-be372820d69c\") " pod="openstack/heat-cfnapi-756b755c6-wqm2k" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.257434 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/57723d21-6e34-4a5b-8063-9d5b97022cfc-internal-tls-certs\") pod \"heat-api-7cf6b8b6cd-csh8n\" (UID: \"57723d21-6e34-4a5b-8063-9d5b97022cfc\") " pod="openstack/heat-api-7cf6b8b6cd-csh8n" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.258700 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/57723d21-6e34-4a5b-8063-9d5b97022cfc-config-data-custom\") pod \"heat-api-7cf6b8b6cd-csh8n\" (UID: \"57723d21-6e34-4a5b-8063-9d5b97022cfc\") " pod="openstack/heat-api-7cf6b8b6cd-csh8n" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.259598 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57723d21-6e34-4a5b-8063-9d5b97022cfc-config-data\") pod \"heat-api-7cf6b8b6cd-csh8n\" (UID: \"57723d21-6e34-4a5b-8063-9d5b97022cfc\") " pod="openstack/heat-api-7cf6b8b6cd-csh8n" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.261325 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/57723d21-6e34-4a5b-8063-9d5b97022cfc-public-tls-certs\") pod \"heat-api-7cf6b8b6cd-csh8n\" (UID: \"57723d21-6e34-4a5b-8063-9d5b97022cfc\") " pod="openstack/heat-api-7cf6b8b6cd-csh8n" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.274346 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57723d21-6e34-4a5b-8063-9d5b97022cfc-combined-ca-bundle\") pod \"heat-api-7cf6b8b6cd-csh8n\" (UID: \"57723d21-6e34-4a5b-8063-9d5b97022cfc\") " pod="openstack/heat-api-7cf6b8b6cd-csh8n" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.277409 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-74v5b\" (UniqueName: \"kubernetes.io/projected/57723d21-6e34-4a5b-8063-9d5b97022cfc-kube-api-access-74v5b\") pod \"heat-api-7cf6b8b6cd-csh8n\" (UID: \"57723d21-6e34-4a5b-8063-9d5b97022cfc\") " pod="openstack/heat-api-7cf6b8b6cd-csh8n" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.327947 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-7cf6b8b6cd-csh8n" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.355328 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/aeb8dcf6-3640-4930-8663-be372820d69c-public-tls-certs\") pod \"heat-cfnapi-756b755c6-wqm2k\" (UID: \"aeb8dcf6-3640-4930-8663-be372820d69c\") " pod="openstack/heat-cfnapi-756b755c6-wqm2k" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.355708 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/aeb8dcf6-3640-4930-8663-be372820d69c-config-data-custom\") pod \"heat-cfnapi-756b755c6-wqm2k\" (UID: \"aeb8dcf6-3640-4930-8663-be372820d69c\") " pod="openstack/heat-cfnapi-756b755c6-wqm2k" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.355737 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k87cl\" (UniqueName: \"kubernetes.io/projected/aeb8dcf6-3640-4930-8663-be372820d69c-kube-api-access-k87cl\") pod \"heat-cfnapi-756b755c6-wqm2k\" (UID: \"aeb8dcf6-3640-4930-8663-be372820d69c\") " pod="openstack/heat-cfnapi-756b755c6-wqm2k" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.355769 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/aeb8dcf6-3640-4930-8663-be372820d69c-internal-tls-certs\") pod \"heat-cfnapi-756b755c6-wqm2k\" (UID: \"aeb8dcf6-3640-4930-8663-be372820d69c\") " pod="openstack/heat-cfnapi-756b755c6-wqm2k" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.355806 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aeb8dcf6-3640-4930-8663-be372820d69c-combined-ca-bundle\") pod \"heat-cfnapi-756b755c6-wqm2k\" (UID: \"aeb8dcf6-3640-4930-8663-be372820d69c\") " pod="openstack/heat-cfnapi-756b755c6-wqm2k" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.355825 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aeb8dcf6-3640-4930-8663-be372820d69c-config-data\") pod \"heat-cfnapi-756b755c6-wqm2k\" (UID: \"aeb8dcf6-3640-4930-8663-be372820d69c\") " pod="openstack/heat-cfnapi-756b755c6-wqm2k" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.359890 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/aeb8dcf6-3640-4930-8663-be372820d69c-config-data-custom\") pod \"heat-cfnapi-756b755c6-wqm2k\" (UID: \"aeb8dcf6-3640-4930-8663-be372820d69c\") " pod="openstack/heat-cfnapi-756b755c6-wqm2k" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.363720 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aeb8dcf6-3640-4930-8663-be372820d69c-config-data\") pod \"heat-cfnapi-756b755c6-wqm2k\" (UID: \"aeb8dcf6-3640-4930-8663-be372820d69c\") " pod="openstack/heat-cfnapi-756b755c6-wqm2k" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.364387 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/aeb8dcf6-3640-4930-8663-be372820d69c-internal-tls-certs\") pod \"heat-cfnapi-756b755c6-wqm2k\" (UID: \"aeb8dcf6-3640-4930-8663-be372820d69c\") " pod="openstack/heat-cfnapi-756b755c6-wqm2k" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.367096 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aeb8dcf6-3640-4930-8663-be372820d69c-combined-ca-bundle\") pod \"heat-cfnapi-756b755c6-wqm2k\" (UID: \"aeb8dcf6-3640-4930-8663-be372820d69c\") " pod="openstack/heat-cfnapi-756b755c6-wqm2k" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.368411 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/aeb8dcf6-3640-4930-8663-be372820d69c-public-tls-certs\") pod \"heat-cfnapi-756b755c6-wqm2k\" (UID: \"aeb8dcf6-3640-4930-8663-be372820d69c\") " pod="openstack/heat-cfnapi-756b755c6-wqm2k" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.373207 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k87cl\" (UniqueName: \"kubernetes.io/projected/aeb8dcf6-3640-4930-8663-be372820d69c-kube-api-access-k87cl\") pod \"heat-cfnapi-756b755c6-wqm2k\" (UID: \"aeb8dcf6-3640-4930-8663-be372820d69c\") " pod="openstack/heat-cfnapi-756b755c6-wqm2k" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.405046 4854 generic.go:334] "Generic (PLEG): container finished" podID="2fc43285-4e2b-4d05-b52b-b446b200723e" containerID="53f9f165f353b1956da53b03a71fc50c1ecfbf30fb2f590408f1a56169f39075" exitCode=1 Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.405135 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-d874db4c8-mzdgt" event={"ID":"2fc43285-4e2b-4d05-b52b-b446b200723e","Type":"ContainerDied","Data":"53f9f165f353b1956da53b03a71fc50c1ecfbf30fb2f590408f1a56169f39075"} Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.405176 4854 scope.go:117] "RemoveContainer" containerID="ba90533bff9e52047da741b984816523718015a68da7fd4eabff51776ca073ce" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.405948 4854 scope.go:117] "RemoveContainer" containerID="53f9f165f353b1956da53b03a71fc50c1ecfbf30fb2f590408f1a56169f39075" Nov 25 09:59:44 crc kubenswrapper[4854]: E1125 09:59:44.406210 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-d874db4c8-mzdgt_openstack(2fc43285-4e2b-4d05-b52b-b446b200723e)\"" pod="openstack/heat-api-d874db4c8-mzdgt" podUID="2fc43285-4e2b-4d05-b52b-b446b200723e" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.439036 4854 scope.go:117] "RemoveContainer" containerID="b71373c5ed36e6f9cd07aefe202391580817607457b50933b56db407c7162435" Nov 25 09:59:44 crc kubenswrapper[4854]: E1125 09:59:44.439799 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-5469c8bfd4-mmvtn_openstack(db0965e6-0374-4b14-875e-8557c346815e)\"" pod="openstack/heat-cfnapi-5469c8bfd4-mmvtn" podUID="db0965e6-0374-4b14-875e-8557c346815e" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.471265 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-756b755c6-wqm2k" Nov 25 09:59:44 crc kubenswrapper[4854]: I1125 09:59:44.997495 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-7cf6b8b6cd-csh8n"] Nov 25 09:59:45 crc kubenswrapper[4854]: I1125 09:59:45.035478 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7c0062e-6699-45fa-a1fb-a9efb44a80e5" path="/var/lib/kubelet/pods/f7c0062e-6699-45fa-a1fb-a9efb44a80e5/volumes" Nov 25 09:59:45 crc kubenswrapper[4854]: I1125 09:59:45.190474 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-756b755c6-wqm2k"] Nov 25 09:59:45 crc kubenswrapper[4854]: I1125 09:59:45.377794 4854 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-cfnapi-5469c8bfd4-mmvtn" Nov 25 09:59:45 crc kubenswrapper[4854]: I1125 09:59:45.377856 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-5469c8bfd4-mmvtn" Nov 25 09:59:45 crc kubenswrapper[4854]: I1125 09:59:45.388163 4854 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-api-d874db4c8-mzdgt" Nov 25 09:59:45 crc kubenswrapper[4854]: I1125 09:59:45.474057 4854 scope.go:117] "RemoveContainer" containerID="53f9f165f353b1956da53b03a71fc50c1ecfbf30fb2f590408f1a56169f39075" Nov 25 09:59:45 crc kubenswrapper[4854]: E1125 09:59:45.474390 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-d874db4c8-mzdgt_openstack(2fc43285-4e2b-4d05-b52b-b446b200723e)\"" pod="openstack/heat-api-d874db4c8-mzdgt" podUID="2fc43285-4e2b-4d05-b52b-b446b200723e" Nov 25 09:59:45 crc kubenswrapper[4854]: I1125 09:59:45.475738 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-756b755c6-wqm2k" event={"ID":"aeb8dcf6-3640-4930-8663-be372820d69c","Type":"ContainerStarted","Data":"77e544bca9d533d4f5127bd91c0be1c0a1e8faa48fc0f8dbe455efa37408624f"} Nov 25 09:59:45 crc kubenswrapper[4854]: I1125 09:59:45.478835 4854 scope.go:117] "RemoveContainer" containerID="b71373c5ed36e6f9cd07aefe202391580817607457b50933b56db407c7162435" Nov 25 09:59:45 crc kubenswrapper[4854]: E1125 09:59:45.479119 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-5469c8bfd4-mmvtn_openstack(db0965e6-0374-4b14-875e-8557c346815e)\"" pod="openstack/heat-cfnapi-5469c8bfd4-mmvtn" podUID="db0965e6-0374-4b14-875e-8557c346815e" Nov 25 09:59:45 crc kubenswrapper[4854]: I1125 09:59:45.479425 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-7cf6b8b6cd-csh8n" event={"ID":"57723d21-6e34-4a5b-8063-9d5b97022cfc","Type":"ContainerStarted","Data":"01de8bc75edf7fe46d55ccf6004435ed8d21050212f5db0f8d6b3f61509c663f"} Nov 25 09:59:45 crc kubenswrapper[4854]: I1125 09:59:45.479483 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-7cf6b8b6cd-csh8n" event={"ID":"57723d21-6e34-4a5b-8063-9d5b97022cfc","Type":"ContainerStarted","Data":"e1d930f79b3bc94c3a9a9bef5c8dc4358d8c98cd7cb9aab0b22f84f31c1131fa"} Nov 25 09:59:45 crc kubenswrapper[4854]: I1125 09:59:45.479586 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-7cf6b8b6cd-csh8n" Nov 25 09:59:45 crc kubenswrapper[4854]: I1125 09:59:45.550035 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-7cf6b8b6cd-csh8n" podStartSLOduration=2.550009428 podStartE2EDuration="2.550009428s" podCreationTimestamp="2025-11-25 09:59:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:59:45.539086735 +0000 UTC m=+1391.392080131" watchObservedRunningTime="2025-11-25 09:59:45.550009428 +0000 UTC m=+1391.403002804" Nov 25 09:59:46 crc kubenswrapper[4854]: I1125 09:59:46.523790 4854 scope.go:117] "RemoveContainer" containerID="53f9f165f353b1956da53b03a71fc50c1ecfbf30fb2f590408f1a56169f39075" Nov 25 09:59:46 crc kubenswrapper[4854]: E1125 09:59:46.525098 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-d874db4c8-mzdgt_openstack(2fc43285-4e2b-4d05-b52b-b446b200723e)\"" pod="openstack/heat-api-d874db4c8-mzdgt" podUID="2fc43285-4e2b-4d05-b52b-b446b200723e" Nov 25 09:59:46 crc kubenswrapper[4854]: I1125 09:59:46.525568 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-756b755c6-wqm2k" event={"ID":"aeb8dcf6-3640-4930-8663-be372820d69c","Type":"ContainerStarted","Data":"7c876d7b8becacb2d7cf0f7a7a2fa2e375c90003ca3092d163dae8f1456e2edc"} Nov 25 09:59:46 crc kubenswrapper[4854]: I1125 09:59:46.536535 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-756b755c6-wqm2k" Nov 25 09:59:46 crc kubenswrapper[4854]: I1125 09:59:46.556689 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-756b755c6-wqm2k" podStartSLOduration=3.55664244 podStartE2EDuration="3.55664244s" podCreationTimestamp="2025-11-25 09:59:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:59:46.551181009 +0000 UTC m=+1392.404174385" watchObservedRunningTime="2025-11-25 09:59:46.55664244 +0000 UTC m=+1392.409635816" Nov 25 09:59:46 crc kubenswrapper[4854]: I1125 09:59:46.997309 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-p2hhj" Nov 25 09:59:46 crc kubenswrapper[4854]: I1125 09:59:46.997573 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-p2hhj" Nov 25 09:59:47 crc kubenswrapper[4854]: I1125 09:59:47.067046 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-p2hhj" Nov 25 09:59:47 crc kubenswrapper[4854]: I1125 09:59:47.201498 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6578955fd5-wrpfb" podUID="f7c0062e-6699-45fa-a1fb-a9efb44a80e5" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.204:5353: i/o timeout" Nov 25 09:59:47 crc kubenswrapper[4854]: I1125 09:59:47.317210 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:59:47 crc kubenswrapper[4854]: I1125 09:59:47.317561 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="c67dbcea-a3b9-46ac-833c-97595c61756e" containerName="glance-httpd" containerID="cri-o://ce845be113d7c4914879cee65d8279f1fe6e5641babefbbdaa7470c88928078b" gracePeriod=30 Nov 25 09:59:47 crc kubenswrapper[4854]: I1125 09:59:47.317905 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="c67dbcea-a3b9-46ac-833c-97595c61756e" containerName="glance-log" containerID="cri-o://828a51a9b39919a71b5ae3a8bf1007751e179b303d3e3675ecd94d37882a7715" gracePeriod=30 Nov 25 09:59:47 crc kubenswrapper[4854]: I1125 09:59:47.533480 4854 generic.go:334] "Generic (PLEG): container finished" podID="c67dbcea-a3b9-46ac-833c-97595c61756e" containerID="828a51a9b39919a71b5ae3a8bf1007751e179b303d3e3675ecd94d37882a7715" exitCode=143 Nov 25 09:59:47 crc kubenswrapper[4854]: I1125 09:59:47.533572 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c67dbcea-a3b9-46ac-833c-97595c61756e","Type":"ContainerDied","Data":"828a51a9b39919a71b5ae3a8bf1007751e179b303d3e3675ecd94d37882a7715"} Nov 25 09:59:47 crc kubenswrapper[4854]: I1125 09:59:47.597463 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-p2hhj" Nov 25 09:59:47 crc kubenswrapper[4854]: I1125 09:59:47.816842 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-p2hhj"] Nov 25 09:59:48 crc kubenswrapper[4854]: I1125 09:59:48.835479 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:59:48 crc kubenswrapper[4854]: I1125 09:59:48.836070 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="9291c61a-5095-4ccb-a6a0-e1e618bfb501" containerName="glance-log" containerID="cri-o://f1a6cbcb770c9dcf08497157b59b07b98a4ebfc9e1e40169544fc1aaa7aefeb3" gracePeriod=30 Nov 25 09:59:48 crc kubenswrapper[4854]: I1125 09:59:48.836692 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="9291c61a-5095-4ccb-a6a0-e1e618bfb501" containerName="glance-httpd" containerID="cri-o://3925d0ecbcf492ecf430f31674a2f8cf6d1429050e7fd4aff7d6dae030077326" gracePeriod=30 Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.519542 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.580424 4854 generic.go:334] "Generic (PLEG): container finished" podID="b0f90516-2fdf-4c1a-86e2-ea4626d8329f" containerID="be204f745c00da1daba9f9591c35691d193c6dbc9bb5a5fe0debeb822639879d" exitCode=0 Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.580484 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b0f90516-2fdf-4c1a-86e2-ea4626d8329f","Type":"ContainerDied","Data":"be204f745c00da1daba9f9591c35691d193c6dbc9bb5a5fe0debeb822639879d"} Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.580513 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b0f90516-2fdf-4c1a-86e2-ea4626d8329f","Type":"ContainerDied","Data":"5ff6f0ebae2cf874d7dd207c637bce4046557c4ff1ded83cf0f371894cf6aaa3"} Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.580529 4854 scope.go:117] "RemoveContainer" containerID="e17d03b7d53741134effaebc31ff1f6db7a82d3e6cb72ca42534da755824ef3b" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.580617 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.606695 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-scripts\") pod \"b0f90516-2fdf-4c1a-86e2-ea4626d8329f\" (UID: \"b0f90516-2fdf-4c1a-86e2-ea4626d8329f\") " Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.606804 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-sg-core-conf-yaml\") pod \"b0f90516-2fdf-4c1a-86e2-ea4626d8329f\" (UID: \"b0f90516-2fdf-4c1a-86e2-ea4626d8329f\") " Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.606921 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-combined-ca-bundle\") pod \"b0f90516-2fdf-4c1a-86e2-ea4626d8329f\" (UID: \"b0f90516-2fdf-4c1a-86e2-ea4626d8329f\") " Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.606984 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hr2pc\" (UniqueName: \"kubernetes.io/projected/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-kube-api-access-hr2pc\") pod \"b0f90516-2fdf-4c1a-86e2-ea4626d8329f\" (UID: \"b0f90516-2fdf-4c1a-86e2-ea4626d8329f\") " Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.607072 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-run-httpd\") pod \"b0f90516-2fdf-4c1a-86e2-ea4626d8329f\" (UID: \"b0f90516-2fdf-4c1a-86e2-ea4626d8329f\") " Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.607102 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-log-httpd\") pod \"b0f90516-2fdf-4c1a-86e2-ea4626d8329f\" (UID: \"b0f90516-2fdf-4c1a-86e2-ea4626d8329f\") " Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.607156 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-config-data\") pod \"b0f90516-2fdf-4c1a-86e2-ea4626d8329f\" (UID: \"b0f90516-2fdf-4c1a-86e2-ea4626d8329f\") " Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.610829 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "b0f90516-2fdf-4c1a-86e2-ea4626d8329f" (UID: "b0f90516-2fdf-4c1a-86e2-ea4626d8329f"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.612031 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "b0f90516-2fdf-4c1a-86e2-ea4626d8329f" (UID: "b0f90516-2fdf-4c1a-86e2-ea4626d8329f"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.616074 4854 generic.go:334] "Generic (PLEG): container finished" podID="9291c61a-5095-4ccb-a6a0-e1e618bfb501" containerID="f1a6cbcb770c9dcf08497157b59b07b98a4ebfc9e1e40169544fc1aaa7aefeb3" exitCode=143 Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.616367 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-p2hhj" podUID="f323116e-620c-40d7-8cc7-55315cd06335" containerName="registry-server" containerID="cri-o://fdf0b5385ac1cce1db415dac0006e46fdbc371f69ad72562da1ebe61359fe073" gracePeriod=2 Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.616939 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9291c61a-5095-4ccb-a6a0-e1e618bfb501","Type":"ContainerDied","Data":"f1a6cbcb770c9dcf08497157b59b07b98a4ebfc9e1e40169544fc1aaa7aefeb3"} Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.635472 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-kube-api-access-hr2pc" (OuterVolumeSpecName: "kube-api-access-hr2pc") pod "b0f90516-2fdf-4c1a-86e2-ea4626d8329f" (UID: "b0f90516-2fdf-4c1a-86e2-ea4626d8329f"). InnerVolumeSpecName "kube-api-access-hr2pc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.651082 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-scripts" (OuterVolumeSpecName: "scripts") pod "b0f90516-2fdf-4c1a-86e2-ea4626d8329f" (UID: "b0f90516-2fdf-4c1a-86e2-ea4626d8329f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.654764 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-6hbdw"] Nov 25 09:59:49 crc kubenswrapper[4854]: E1125 09:59:49.657440 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0f90516-2fdf-4c1a-86e2-ea4626d8329f" containerName="sg-core" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.657474 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0f90516-2fdf-4c1a-86e2-ea4626d8329f" containerName="sg-core" Nov 25 09:59:49 crc kubenswrapper[4854]: E1125 09:59:49.657531 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0f90516-2fdf-4c1a-86e2-ea4626d8329f" containerName="proxy-httpd" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.657538 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0f90516-2fdf-4c1a-86e2-ea4626d8329f" containerName="proxy-httpd" Nov 25 09:59:49 crc kubenswrapper[4854]: E1125 09:59:49.657575 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0f90516-2fdf-4c1a-86e2-ea4626d8329f" containerName="ceilometer-central-agent" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.657581 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0f90516-2fdf-4c1a-86e2-ea4626d8329f" containerName="ceilometer-central-agent" Nov 25 09:59:49 crc kubenswrapper[4854]: E1125 09:59:49.657606 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0f90516-2fdf-4c1a-86e2-ea4626d8329f" containerName="ceilometer-notification-agent" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.657614 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0f90516-2fdf-4c1a-86e2-ea4626d8329f" containerName="ceilometer-notification-agent" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.658320 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0f90516-2fdf-4c1a-86e2-ea4626d8329f" containerName="sg-core" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.658346 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0f90516-2fdf-4c1a-86e2-ea4626d8329f" containerName="ceilometer-central-agent" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.658357 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0f90516-2fdf-4c1a-86e2-ea4626d8329f" containerName="ceilometer-notification-agent" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.658375 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0f90516-2fdf-4c1a-86e2-ea4626d8329f" containerName="proxy-httpd" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.660150 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-6hbdw" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.715611 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hr2pc\" (UniqueName: \"kubernetes.io/projected/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-kube-api-access-hr2pc\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.715645 4854 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.715685 4854 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.715693 4854 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.733633 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-6hbdw"] Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.791018 4854 scope.go:117] "RemoveContainer" containerID="a16594f1f0d17c104622cab8dca5a88b91d0ddf48062c3fac4fb2dea4056c9cb" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.822205 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37479b5f-7d9e-4202-8807-6442cf079a33-operator-scripts\") pod \"nova-api-db-create-6hbdw\" (UID: \"37479b5f-7d9e-4202-8807-6442cf079a33\") " pod="openstack/nova-api-db-create-6hbdw" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.822340 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2tsdl\" (UniqueName: \"kubernetes.io/projected/37479b5f-7d9e-4202-8807-6442cf079a33-kube-api-access-2tsdl\") pod \"nova-api-db-create-6hbdw\" (UID: \"37479b5f-7d9e-4202-8807-6442cf079a33\") " pod="openstack/nova-api-db-create-6hbdw" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.842707 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-dbac-account-create-kk5zc"] Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.844426 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-dbac-account-create-kk5zc" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.846839 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.858834 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "b0f90516-2fdf-4c1a-86e2-ea4626d8329f" (UID: "b0f90516-2fdf-4c1a-86e2-ea4626d8329f"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.869069 4854 scope.go:117] "RemoveContainer" containerID="dcf0dcf535c06a609c0a2da5894b4cfc5ea413984742ec20806adf0b4170ad32" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.895842 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-dbac-account-create-kk5zc"] Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.906441 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-swlpj"] Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.907987 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-swlpj" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.924068 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37479b5f-7d9e-4202-8807-6442cf079a33-operator-scripts\") pod \"nova-api-db-create-6hbdw\" (UID: \"37479b5f-7d9e-4202-8807-6442cf079a33\") " pod="openstack/nova-api-db-create-6hbdw" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.924136 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-swlpj"] Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.924189 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2tsdl\" (UniqueName: \"kubernetes.io/projected/37479b5f-7d9e-4202-8807-6442cf079a33-kube-api-access-2tsdl\") pod \"nova-api-db-create-6hbdw\" (UID: \"37479b5f-7d9e-4202-8807-6442cf079a33\") " pod="openstack/nova-api-db-create-6hbdw" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.924241 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pl95s\" (UniqueName: \"kubernetes.io/projected/c6d04f91-08d5-484d-98a1-a1b1fc315df4-kube-api-access-pl95s\") pod \"nova-api-dbac-account-create-kk5zc\" (UID: \"c6d04f91-08d5-484d-98a1-a1b1fc315df4\") " pod="openstack/nova-api-dbac-account-create-kk5zc" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.924346 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c6d04f91-08d5-484d-98a1-a1b1fc315df4-operator-scripts\") pod \"nova-api-dbac-account-create-kk5zc\" (UID: \"c6d04f91-08d5-484d-98a1-a1b1fc315df4\") " pod="openstack/nova-api-dbac-account-create-kk5zc" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.924445 4854 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.926520 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37479b5f-7d9e-4202-8807-6442cf079a33-operator-scripts\") pod \"nova-api-db-create-6hbdw\" (UID: \"37479b5f-7d9e-4202-8807-6442cf079a33\") " pod="openstack/nova-api-db-create-6hbdw" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.928022 4854 scope.go:117] "RemoveContainer" containerID="be204f745c00da1daba9f9591c35691d193c6dbc9bb5a5fe0debeb822639879d" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.928044 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b0f90516-2fdf-4c1a-86e2-ea4626d8329f" (UID: "b0f90516-2fdf-4c1a-86e2-ea4626d8329f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.952031 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-config-data" (OuterVolumeSpecName: "config-data") pod "b0f90516-2fdf-4c1a-86e2-ea4626d8329f" (UID: "b0f90516-2fdf-4c1a-86e2-ea4626d8329f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.967354 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2tsdl\" (UniqueName: \"kubernetes.io/projected/37479b5f-7d9e-4202-8807-6442cf079a33-kube-api-access-2tsdl\") pod \"nova-api-db-create-6hbdw\" (UID: \"37479b5f-7d9e-4202-8807-6442cf079a33\") " pod="openstack/nova-api-db-create-6hbdw" Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.993174 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-daab-account-create-5pd4d"] Nov 25 09:59:49 crc kubenswrapper[4854]: I1125 09:59:49.999273 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-daab-account-create-5pd4d" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.003935 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.019055 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-8rxxf"] Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.027361 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c6d04f91-08d5-484d-98a1-a1b1fc315df4-operator-scripts\") pod \"nova-api-dbac-account-create-kk5zc\" (UID: \"c6d04f91-08d5-484d-98a1-a1b1fc315df4\") " pod="openstack/nova-api-dbac-account-create-kk5zc" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.027583 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pl95s\" (UniqueName: \"kubernetes.io/projected/c6d04f91-08d5-484d-98a1-a1b1fc315df4-kube-api-access-pl95s\") pod \"nova-api-dbac-account-create-kk5zc\" (UID: \"c6d04f91-08d5-484d-98a1-a1b1fc315df4\") " pod="openstack/nova-api-dbac-account-create-kk5zc" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.027664 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/affbd45c-173c-492f-b047-3a5db0988607-operator-scripts\") pod \"nova-cell0-db-create-swlpj\" (UID: \"affbd45c-173c-492f-b047-3a5db0988607\") " pod="openstack/nova-cell0-db-create-swlpj" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.027742 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7tkq5\" (UniqueName: \"kubernetes.io/projected/affbd45c-173c-492f-b047-3a5db0988607-kube-api-access-7tkq5\") pod \"nova-cell0-db-create-swlpj\" (UID: \"affbd45c-173c-492f-b047-3a5db0988607\") " pod="openstack/nova-cell0-db-create-swlpj" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.027824 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.027841 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0f90516-2fdf-4c1a-86e2-ea4626d8329f-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.028734 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c6d04f91-08d5-484d-98a1-a1b1fc315df4-operator-scripts\") pod \"nova-api-dbac-account-create-kk5zc\" (UID: \"c6d04f91-08d5-484d-98a1-a1b1fc315df4\") " pod="openstack/nova-api-dbac-account-create-kk5zc" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.037045 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-8rxxf" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.049329 4854 scope.go:117] "RemoveContainer" containerID="e17d03b7d53741134effaebc31ff1f6db7a82d3e6cb72ca42534da755824ef3b" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.050316 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pl95s\" (UniqueName: \"kubernetes.io/projected/c6d04f91-08d5-484d-98a1-a1b1fc315df4-kube-api-access-pl95s\") pod \"nova-api-dbac-account-create-kk5zc\" (UID: \"c6d04f91-08d5-484d-98a1-a1b1fc315df4\") " pod="openstack/nova-api-dbac-account-create-kk5zc" Nov 25 09:59:50 crc kubenswrapper[4854]: E1125 09:59:50.056838 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e17d03b7d53741134effaebc31ff1f6db7a82d3e6cb72ca42534da755824ef3b\": container with ID starting with e17d03b7d53741134effaebc31ff1f6db7a82d3e6cb72ca42534da755824ef3b not found: ID does not exist" containerID="e17d03b7d53741134effaebc31ff1f6db7a82d3e6cb72ca42534da755824ef3b" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.056881 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e17d03b7d53741134effaebc31ff1f6db7a82d3e6cb72ca42534da755824ef3b"} err="failed to get container status \"e17d03b7d53741134effaebc31ff1f6db7a82d3e6cb72ca42534da755824ef3b\": rpc error: code = NotFound desc = could not find container \"e17d03b7d53741134effaebc31ff1f6db7a82d3e6cb72ca42534da755824ef3b\": container with ID starting with e17d03b7d53741134effaebc31ff1f6db7a82d3e6cb72ca42534da755824ef3b not found: ID does not exist" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.056910 4854 scope.go:117] "RemoveContainer" containerID="a16594f1f0d17c104622cab8dca5a88b91d0ddf48062c3fac4fb2dea4056c9cb" Nov 25 09:59:50 crc kubenswrapper[4854]: E1125 09:59:50.060117 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a16594f1f0d17c104622cab8dca5a88b91d0ddf48062c3fac4fb2dea4056c9cb\": container with ID starting with a16594f1f0d17c104622cab8dca5a88b91d0ddf48062c3fac4fb2dea4056c9cb not found: ID does not exist" containerID="a16594f1f0d17c104622cab8dca5a88b91d0ddf48062c3fac4fb2dea4056c9cb" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.060166 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a16594f1f0d17c104622cab8dca5a88b91d0ddf48062c3fac4fb2dea4056c9cb"} err="failed to get container status \"a16594f1f0d17c104622cab8dca5a88b91d0ddf48062c3fac4fb2dea4056c9cb\": rpc error: code = NotFound desc = could not find container \"a16594f1f0d17c104622cab8dca5a88b91d0ddf48062c3fac4fb2dea4056c9cb\": container with ID starting with a16594f1f0d17c104622cab8dca5a88b91d0ddf48062c3fac4fb2dea4056c9cb not found: ID does not exist" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.060192 4854 scope.go:117] "RemoveContainer" containerID="dcf0dcf535c06a609c0a2da5894b4cfc5ea413984742ec20806adf0b4170ad32" Nov 25 09:59:50 crc kubenswrapper[4854]: E1125 09:59:50.064683 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dcf0dcf535c06a609c0a2da5894b4cfc5ea413984742ec20806adf0b4170ad32\": container with ID starting with dcf0dcf535c06a609c0a2da5894b4cfc5ea413984742ec20806adf0b4170ad32 not found: ID does not exist" containerID="dcf0dcf535c06a609c0a2da5894b4cfc5ea413984742ec20806adf0b4170ad32" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.064723 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dcf0dcf535c06a609c0a2da5894b4cfc5ea413984742ec20806adf0b4170ad32"} err="failed to get container status \"dcf0dcf535c06a609c0a2da5894b4cfc5ea413984742ec20806adf0b4170ad32\": rpc error: code = NotFound desc = could not find container \"dcf0dcf535c06a609c0a2da5894b4cfc5ea413984742ec20806adf0b4170ad32\": container with ID starting with dcf0dcf535c06a609c0a2da5894b4cfc5ea413984742ec20806adf0b4170ad32 not found: ID does not exist" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.064748 4854 scope.go:117] "RemoveContainer" containerID="be204f745c00da1daba9f9591c35691d193c6dbc9bb5a5fe0debeb822639879d" Nov 25 09:59:50 crc kubenswrapper[4854]: E1125 09:59:50.065241 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be204f745c00da1daba9f9591c35691d193c6dbc9bb5a5fe0debeb822639879d\": container with ID starting with be204f745c00da1daba9f9591c35691d193c6dbc9bb5a5fe0debeb822639879d not found: ID does not exist" containerID="be204f745c00da1daba9f9591c35691d193c6dbc9bb5a5fe0debeb822639879d" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.065281 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be204f745c00da1daba9f9591c35691d193c6dbc9bb5a5fe0debeb822639879d"} err="failed to get container status \"be204f745c00da1daba9f9591c35691d193c6dbc9bb5a5fe0debeb822639879d\": rpc error: code = NotFound desc = could not find container \"be204f745c00da1daba9f9591c35691d193c6dbc9bb5a5fe0debeb822639879d\": container with ID starting with be204f745c00da1daba9f9591c35691d193c6dbc9bb5a5fe0debeb822639879d not found: ID does not exist" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.077791 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-daab-account-create-5pd4d"] Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.111826 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-8rxxf"] Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.129411 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2822fcff-eadb-4d68-9297-2940d4573bc7-operator-scripts\") pod \"nova-cell1-db-create-8rxxf\" (UID: \"2822fcff-eadb-4d68-9297-2940d4573bc7\") " pod="openstack/nova-cell1-db-create-8rxxf" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.129484 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9bfg6\" (UniqueName: \"kubernetes.io/projected/2822fcff-eadb-4d68-9297-2940d4573bc7-kube-api-access-9bfg6\") pod \"nova-cell1-db-create-8rxxf\" (UID: \"2822fcff-eadb-4d68-9297-2940d4573bc7\") " pod="openstack/nova-cell1-db-create-8rxxf" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.129572 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d9164e45-ade8-4f78-b89e-e6e3b61e1a4e-operator-scripts\") pod \"nova-cell0-daab-account-create-5pd4d\" (UID: \"d9164e45-ade8-4f78-b89e-e6e3b61e1a4e\") " pod="openstack/nova-cell0-daab-account-create-5pd4d" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.129625 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/affbd45c-173c-492f-b047-3a5db0988607-operator-scripts\") pod \"nova-cell0-db-create-swlpj\" (UID: \"affbd45c-173c-492f-b047-3a5db0988607\") " pod="openstack/nova-cell0-db-create-swlpj" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.129687 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7tkq5\" (UniqueName: \"kubernetes.io/projected/affbd45c-173c-492f-b047-3a5db0988607-kube-api-access-7tkq5\") pod \"nova-cell0-db-create-swlpj\" (UID: \"affbd45c-173c-492f-b047-3a5db0988607\") " pod="openstack/nova-cell0-db-create-swlpj" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.129813 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gnmvv\" (UniqueName: \"kubernetes.io/projected/d9164e45-ade8-4f78-b89e-e6e3b61e1a4e-kube-api-access-gnmvv\") pod \"nova-cell0-daab-account-create-5pd4d\" (UID: \"d9164e45-ade8-4f78-b89e-e6e3b61e1a4e\") " pod="openstack/nova-cell0-daab-account-create-5pd4d" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.131508 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/affbd45c-173c-492f-b047-3a5db0988607-operator-scripts\") pod \"nova-cell0-db-create-swlpj\" (UID: \"affbd45c-173c-492f-b047-3a5db0988607\") " pod="openstack/nova-cell0-db-create-swlpj" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.133095 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-6hbdw" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.158310 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7tkq5\" (UniqueName: \"kubernetes.io/projected/affbd45c-173c-492f-b047-3a5db0988607-kube-api-access-7tkq5\") pod \"nova-cell0-db-create-swlpj\" (UID: \"affbd45c-173c-492f-b047-3a5db0988607\") " pod="openstack/nova-cell0-db-create-swlpj" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.203938 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-7fef-account-create-np5sq"] Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.206389 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-7fef-account-create-np5sq" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.214045 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.226774 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-7fef-account-create-np5sq"] Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.235650 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-dbac-account-create-kk5zc" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.237514 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gnmvv\" (UniqueName: \"kubernetes.io/projected/d9164e45-ade8-4f78-b89e-e6e3b61e1a4e-kube-api-access-gnmvv\") pod \"nova-cell0-daab-account-create-5pd4d\" (UID: \"d9164e45-ade8-4f78-b89e-e6e3b61e1a4e\") " pod="openstack/nova-cell0-daab-account-create-5pd4d" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.237740 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2822fcff-eadb-4d68-9297-2940d4573bc7-operator-scripts\") pod \"nova-cell1-db-create-8rxxf\" (UID: \"2822fcff-eadb-4d68-9297-2940d4573bc7\") " pod="openstack/nova-cell1-db-create-8rxxf" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.237836 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9bfg6\" (UniqueName: \"kubernetes.io/projected/2822fcff-eadb-4d68-9297-2940d4573bc7-kube-api-access-9bfg6\") pod \"nova-cell1-db-create-8rxxf\" (UID: \"2822fcff-eadb-4d68-9297-2940d4573bc7\") " pod="openstack/nova-cell1-db-create-8rxxf" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.237969 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d9164e45-ade8-4f78-b89e-e6e3b61e1a4e-operator-scripts\") pod \"nova-cell0-daab-account-create-5pd4d\" (UID: \"d9164e45-ade8-4f78-b89e-e6e3b61e1a4e\") " pod="openstack/nova-cell0-daab-account-create-5pd4d" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.248495 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2822fcff-eadb-4d68-9297-2940d4573bc7-operator-scripts\") pod \"nova-cell1-db-create-8rxxf\" (UID: \"2822fcff-eadb-4d68-9297-2940d4573bc7\") " pod="openstack/nova-cell1-db-create-8rxxf" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.249374 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-swlpj" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.253651 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d9164e45-ade8-4f78-b89e-e6e3b61e1a4e-operator-scripts\") pod \"nova-cell0-daab-account-create-5pd4d\" (UID: \"d9164e45-ade8-4f78-b89e-e6e3b61e1a4e\") " pod="openstack/nova-cell0-daab-account-create-5pd4d" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.276147 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gnmvv\" (UniqueName: \"kubernetes.io/projected/d9164e45-ade8-4f78-b89e-e6e3b61e1a4e-kube-api-access-gnmvv\") pod \"nova-cell0-daab-account-create-5pd4d\" (UID: \"d9164e45-ade8-4f78-b89e-e6e3b61e1a4e\") " pod="openstack/nova-cell0-daab-account-create-5pd4d" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.280313 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9bfg6\" (UniqueName: \"kubernetes.io/projected/2822fcff-eadb-4d68-9297-2940d4573bc7-kube-api-access-9bfg6\") pod \"nova-cell1-db-create-8rxxf\" (UID: \"2822fcff-eadb-4d68-9297-2940d4573bc7\") " pod="openstack/nova-cell1-db-create-8rxxf" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.339823 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5tmxf\" (UniqueName: \"kubernetes.io/projected/01b1d826-91f3-4136-b30a-f48b2e6934a9-kube-api-access-5tmxf\") pod \"nova-cell1-7fef-account-create-np5sq\" (UID: \"01b1d826-91f3-4136-b30a-f48b2e6934a9\") " pod="openstack/nova-cell1-7fef-account-create-np5sq" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.340201 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/01b1d826-91f3-4136-b30a-f48b2e6934a9-operator-scripts\") pod \"nova-cell1-7fef-account-create-np5sq\" (UID: \"01b1d826-91f3-4136-b30a-f48b2e6934a9\") " pod="openstack/nova-cell1-7fef-account-create-np5sq" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.367075 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-daab-account-create-5pd4d" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.399997 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-8rxxf" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.400442 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-api-55dc74c94f-t88f4" podUID="b8f14245-4267-4921-996d-6d192b4c9953" containerName="heat-api" probeResult="failure" output="Get \"http://10.217.0.216:8004/healthcheck\": read tcp 10.217.0.2:48104->10.217.0.216:8004: read: connection reset by peer" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.408260 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.444473 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5tmxf\" (UniqueName: \"kubernetes.io/projected/01b1d826-91f3-4136-b30a-f48b2e6934a9-kube-api-access-5tmxf\") pod \"nova-cell1-7fef-account-create-np5sq\" (UID: \"01b1d826-91f3-4136-b30a-f48b2e6934a9\") " pod="openstack/nova-cell1-7fef-account-create-np5sq" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.444718 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/01b1d826-91f3-4136-b30a-f48b2e6934a9-operator-scripts\") pod \"nova-cell1-7fef-account-create-np5sq\" (UID: \"01b1d826-91f3-4136-b30a-f48b2e6934a9\") " pod="openstack/nova-cell1-7fef-account-create-np5sq" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.447988 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/01b1d826-91f3-4136-b30a-f48b2e6934a9-operator-scripts\") pod \"nova-cell1-7fef-account-create-np5sq\" (UID: \"01b1d826-91f3-4136-b30a-f48b2e6934a9\") " pod="openstack/nova-cell1-7fef-account-create-np5sq" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.465930 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5tmxf\" (UniqueName: \"kubernetes.io/projected/01b1d826-91f3-4136-b30a-f48b2e6934a9-kube-api-access-5tmxf\") pod \"nova-cell1-7fef-account-create-np5sq\" (UID: \"01b1d826-91f3-4136-b30a-f48b2e6934a9\") " pod="openstack/nova-cell1-7fef-account-create-np5sq" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.472561 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-cfnapi-6d5696cb69-gnxts" podUID="964f5abd-0c6c-47cf-82ca-ea31aaf2b522" containerName="heat-cfnapi" probeResult="failure" output="Get \"http://10.217.0.215:8000/healthcheck\": read tcp 10.217.0.2:36400->10.217.0.215:8000: read: connection reset by peer" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.479495 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-7fef-account-create-np5sq" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.488644 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.495957 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p2hhj" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.511592 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:59:50 crc kubenswrapper[4854]: E1125 09:59:50.516991 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f323116e-620c-40d7-8cc7-55315cd06335" containerName="extract-utilities" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.517024 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="f323116e-620c-40d7-8cc7-55315cd06335" containerName="extract-utilities" Nov 25 09:59:50 crc kubenswrapper[4854]: E1125 09:59:50.517044 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f323116e-620c-40d7-8cc7-55315cd06335" containerName="registry-server" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.517054 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="f323116e-620c-40d7-8cc7-55315cd06335" containerName="registry-server" Nov 25 09:59:50 crc kubenswrapper[4854]: E1125 09:59:50.517085 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f323116e-620c-40d7-8cc7-55315cd06335" containerName="extract-content" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.517094 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="f323116e-620c-40d7-8cc7-55315cd06335" containerName="extract-content" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.517396 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="f323116e-620c-40d7-8cc7-55315cd06335" containerName="registry-server" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.547070 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m9dts\" (UniqueName: \"kubernetes.io/projected/f323116e-620c-40d7-8cc7-55315cd06335-kube-api-access-m9dts\") pod \"f323116e-620c-40d7-8cc7-55315cd06335\" (UID: \"f323116e-620c-40d7-8cc7-55315cd06335\") " Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.547196 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f323116e-620c-40d7-8cc7-55315cd06335-catalog-content\") pod \"f323116e-620c-40d7-8cc7-55315cd06335\" (UID: \"f323116e-620c-40d7-8cc7-55315cd06335\") " Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.547389 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f323116e-620c-40d7-8cc7-55315cd06335-utilities\") pod \"f323116e-620c-40d7-8cc7-55315cd06335\" (UID: \"f323116e-620c-40d7-8cc7-55315cd06335\") " Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.552599 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.552745 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.561338 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.563306 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f323116e-620c-40d7-8cc7-55315cd06335-utilities" (OuterVolumeSpecName: "utilities") pod "f323116e-620c-40d7-8cc7-55315cd06335" (UID: "f323116e-620c-40d7-8cc7-55315cd06335"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.566786 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f323116e-620c-40d7-8cc7-55315cd06335-kube-api-access-m9dts" (OuterVolumeSpecName: "kube-api-access-m9dts") pod "f323116e-620c-40d7-8cc7-55315cd06335" (UID: "f323116e-620c-40d7-8cc7-55315cd06335"). InnerVolumeSpecName "kube-api-access-m9dts". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.572324 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.597128 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f323116e-620c-40d7-8cc7-55315cd06335-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f323116e-620c-40d7-8cc7-55315cd06335" (UID: "f323116e-620c-40d7-8cc7-55315cd06335"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.651155 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ffe5bf34-995f-4ee5-a067-cea929353182-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ffe5bf34-995f-4ee5-a067-cea929353182\") " pod="openstack/ceilometer-0" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.651252 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffe5bf34-995f-4ee5-a067-cea929353182-run-httpd\") pod \"ceilometer-0\" (UID: \"ffe5bf34-995f-4ee5-a067-cea929353182\") " pod="openstack/ceilometer-0" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.651323 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffe5bf34-995f-4ee5-a067-cea929353182-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ffe5bf34-995f-4ee5-a067-cea929353182\") " pod="openstack/ceilometer-0" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.651427 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffe5bf34-995f-4ee5-a067-cea929353182-config-data\") pod \"ceilometer-0\" (UID: \"ffe5bf34-995f-4ee5-a067-cea929353182\") " pod="openstack/ceilometer-0" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.651486 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ffe5bf34-995f-4ee5-a067-cea929353182-scripts\") pod \"ceilometer-0\" (UID: \"ffe5bf34-995f-4ee5-a067-cea929353182\") " pod="openstack/ceilometer-0" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.651524 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffe5bf34-995f-4ee5-a067-cea929353182-log-httpd\") pod \"ceilometer-0\" (UID: \"ffe5bf34-995f-4ee5-a067-cea929353182\") " pod="openstack/ceilometer-0" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.651623 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kklww\" (UniqueName: \"kubernetes.io/projected/ffe5bf34-995f-4ee5-a067-cea929353182-kube-api-access-kklww\") pod \"ceilometer-0\" (UID: \"ffe5bf34-995f-4ee5-a067-cea929353182\") " pod="openstack/ceilometer-0" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.651782 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m9dts\" (UniqueName: \"kubernetes.io/projected/f323116e-620c-40d7-8cc7-55315cd06335-kube-api-access-m9dts\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.651799 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f323116e-620c-40d7-8cc7-55315cd06335-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.651811 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f323116e-620c-40d7-8cc7-55315cd06335-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.682186 4854 generic.go:334] "Generic (PLEG): container finished" podID="f323116e-620c-40d7-8cc7-55315cd06335" containerID="fdf0b5385ac1cce1db415dac0006e46fdbc371f69ad72562da1ebe61359fe073" exitCode=0 Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.682317 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p2hhj" event={"ID":"f323116e-620c-40d7-8cc7-55315cd06335","Type":"ContainerDied","Data":"fdf0b5385ac1cce1db415dac0006e46fdbc371f69ad72562da1ebe61359fe073"} Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.682354 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p2hhj" event={"ID":"f323116e-620c-40d7-8cc7-55315cd06335","Type":"ContainerDied","Data":"666606f3a882f4079221ab2d657c2fc11f7dd84b4946ab1f1055b353575527ea"} Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.682384 4854 scope.go:117] "RemoveContainer" containerID="fdf0b5385ac1cce1db415dac0006e46fdbc371f69ad72562da1ebe61359fe073" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.682547 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p2hhj" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.695147 4854 generic.go:334] "Generic (PLEG): container finished" podID="b8f14245-4267-4921-996d-6d192b4c9953" containerID="f72aa862edcc58565bf471a3e9b31f6609f9c1d12ab0f35ca3b943e40dcd4205" exitCode=0 Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.695255 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-55dc74c94f-t88f4" event={"ID":"b8f14245-4267-4921-996d-6d192b4c9953","Type":"ContainerDied","Data":"f72aa862edcc58565bf471a3e9b31f6609f9c1d12ab0f35ca3b943e40dcd4205"} Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.705518 4854 generic.go:334] "Generic (PLEG): container finished" podID="964f5abd-0c6c-47cf-82ca-ea31aaf2b522" containerID="a4446b3026f09ed88d298a11c202587670e200fe68483c61cdeda6a4c22d923e" exitCode=0 Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.705587 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-6d5696cb69-gnxts" event={"ID":"964f5abd-0c6c-47cf-82ca-ea31aaf2b522","Type":"ContainerDied","Data":"a4446b3026f09ed88d298a11c202587670e200fe68483c61cdeda6a4c22d923e"} Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.739253 4854 generic.go:334] "Generic (PLEG): container finished" podID="c67dbcea-a3b9-46ac-833c-97595c61756e" containerID="ce845be113d7c4914879cee65d8279f1fe6e5641babefbbdaa7470c88928078b" exitCode=0 Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.739316 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c67dbcea-a3b9-46ac-833c-97595c61756e","Type":"ContainerDied","Data":"ce845be113d7c4914879cee65d8279f1fe6e5641babefbbdaa7470c88928078b"} Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.766860 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kklww\" (UniqueName: \"kubernetes.io/projected/ffe5bf34-995f-4ee5-a067-cea929353182-kube-api-access-kklww\") pod \"ceilometer-0\" (UID: \"ffe5bf34-995f-4ee5-a067-cea929353182\") " pod="openstack/ceilometer-0" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.767052 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ffe5bf34-995f-4ee5-a067-cea929353182-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ffe5bf34-995f-4ee5-a067-cea929353182\") " pod="openstack/ceilometer-0" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.767151 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffe5bf34-995f-4ee5-a067-cea929353182-run-httpd\") pod \"ceilometer-0\" (UID: \"ffe5bf34-995f-4ee5-a067-cea929353182\") " pod="openstack/ceilometer-0" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.767246 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffe5bf34-995f-4ee5-a067-cea929353182-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ffe5bf34-995f-4ee5-a067-cea929353182\") " pod="openstack/ceilometer-0" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.767418 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffe5bf34-995f-4ee5-a067-cea929353182-config-data\") pod \"ceilometer-0\" (UID: \"ffe5bf34-995f-4ee5-a067-cea929353182\") " pod="openstack/ceilometer-0" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.767515 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ffe5bf34-995f-4ee5-a067-cea929353182-scripts\") pod \"ceilometer-0\" (UID: \"ffe5bf34-995f-4ee5-a067-cea929353182\") " pod="openstack/ceilometer-0" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.767579 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffe5bf34-995f-4ee5-a067-cea929353182-log-httpd\") pod \"ceilometer-0\" (UID: \"ffe5bf34-995f-4ee5-a067-cea929353182\") " pod="openstack/ceilometer-0" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.769072 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffe5bf34-995f-4ee5-a067-cea929353182-run-httpd\") pod \"ceilometer-0\" (UID: \"ffe5bf34-995f-4ee5-a067-cea929353182\") " pod="openstack/ceilometer-0" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.781430 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ffe5bf34-995f-4ee5-a067-cea929353182-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ffe5bf34-995f-4ee5-a067-cea929353182\") " pod="openstack/ceilometer-0" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.781793 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffe5bf34-995f-4ee5-a067-cea929353182-log-httpd\") pod \"ceilometer-0\" (UID: \"ffe5bf34-995f-4ee5-a067-cea929353182\") " pod="openstack/ceilometer-0" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.791130 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffe5bf34-995f-4ee5-a067-cea929353182-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ffe5bf34-995f-4ee5-a067-cea929353182\") " pod="openstack/ceilometer-0" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.795551 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ffe5bf34-995f-4ee5-a067-cea929353182-scripts\") pod \"ceilometer-0\" (UID: \"ffe5bf34-995f-4ee5-a067-cea929353182\") " pod="openstack/ceilometer-0" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.796577 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffe5bf34-995f-4ee5-a067-cea929353182-config-data\") pod \"ceilometer-0\" (UID: \"ffe5bf34-995f-4ee5-a067-cea929353182\") " pod="openstack/ceilometer-0" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.842530 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kklww\" (UniqueName: \"kubernetes.io/projected/ffe5bf34-995f-4ee5-a067-cea929353182-kube-api-access-kklww\") pod \"ceilometer-0\" (UID: \"ffe5bf34-995f-4ee5-a067-cea929353182\") " pod="openstack/ceilometer-0" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.866892 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-p2hhj"] Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.942794 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-p2hhj"] Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.949735 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:59:50 crc kubenswrapper[4854]: I1125 09:59:50.950611 4854 scope.go:117] "RemoveContainer" containerID="a1a4ea3da684f5af4c1f39b6d1f70b08bba1bded012615b2e6ac6804139f92e2" Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.150262 4854 scope.go:117] "RemoveContainer" containerID="93f0b094ffda1d0c6d04df53d6f472d5a34b8201cd681c700b6cedc6eb458224" Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.330617 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b0f90516-2fdf-4c1a-86e2-ea4626d8329f" path="/var/lib/kubelet/pods/b0f90516-2fdf-4c1a-86e2-ea4626d8329f/volumes" Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.331927 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f323116e-620c-40d7-8cc7-55315cd06335" path="/var/lib/kubelet/pods/f323116e-620c-40d7-8cc7-55315cd06335/volumes" Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.332719 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-6hbdw"] Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.438584 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-795866c477-qft6s" Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.497002 4854 scope.go:117] "RemoveContainer" containerID="fdf0b5385ac1cce1db415dac0006e46fdbc371f69ad72562da1ebe61359fe073" Nov 25 09:59:51 crc kubenswrapper[4854]: E1125 09:59:51.498548 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fdf0b5385ac1cce1db415dac0006e46fdbc371f69ad72562da1ebe61359fe073\": container with ID starting with fdf0b5385ac1cce1db415dac0006e46fdbc371f69ad72562da1ebe61359fe073 not found: ID does not exist" containerID="fdf0b5385ac1cce1db415dac0006e46fdbc371f69ad72562da1ebe61359fe073" Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.498603 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fdf0b5385ac1cce1db415dac0006e46fdbc371f69ad72562da1ebe61359fe073"} err="failed to get container status \"fdf0b5385ac1cce1db415dac0006e46fdbc371f69ad72562da1ebe61359fe073\": rpc error: code = NotFound desc = could not find container \"fdf0b5385ac1cce1db415dac0006e46fdbc371f69ad72562da1ebe61359fe073\": container with ID starting with fdf0b5385ac1cce1db415dac0006e46fdbc371f69ad72562da1ebe61359fe073 not found: ID does not exist" Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.498637 4854 scope.go:117] "RemoveContainer" containerID="a1a4ea3da684f5af4c1f39b6d1f70b08bba1bded012615b2e6ac6804139f92e2" Nov 25 09:59:51 crc kubenswrapper[4854]: E1125 09:59:51.508345 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a1a4ea3da684f5af4c1f39b6d1f70b08bba1bded012615b2e6ac6804139f92e2\": container with ID starting with a1a4ea3da684f5af4c1f39b6d1f70b08bba1bded012615b2e6ac6804139f92e2 not found: ID does not exist" containerID="a1a4ea3da684f5af4c1f39b6d1f70b08bba1bded012615b2e6ac6804139f92e2" Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.508389 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a1a4ea3da684f5af4c1f39b6d1f70b08bba1bded012615b2e6ac6804139f92e2"} err="failed to get container status \"a1a4ea3da684f5af4c1f39b6d1f70b08bba1bded012615b2e6ac6804139f92e2\": rpc error: code = NotFound desc = could not find container \"a1a4ea3da684f5af4c1f39b6d1f70b08bba1bded012615b2e6ac6804139f92e2\": container with ID starting with a1a4ea3da684f5af4c1f39b6d1f70b08bba1bded012615b2e6ac6804139f92e2 not found: ID does not exist" Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.508422 4854 scope.go:117] "RemoveContainer" containerID="93f0b094ffda1d0c6d04df53d6f472d5a34b8201cd681c700b6cedc6eb458224" Nov 25 09:59:51 crc kubenswrapper[4854]: E1125 09:59:51.513584 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"93f0b094ffda1d0c6d04df53d6f472d5a34b8201cd681c700b6cedc6eb458224\": container with ID starting with 93f0b094ffda1d0c6d04df53d6f472d5a34b8201cd681c700b6cedc6eb458224 not found: ID does not exist" containerID="93f0b094ffda1d0c6d04df53d6f472d5a34b8201cd681c700b6cedc6eb458224" Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.513627 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"93f0b094ffda1d0c6d04df53d6f472d5a34b8201cd681c700b6cedc6eb458224"} err="failed to get container status \"93f0b094ffda1d0c6d04df53d6f472d5a34b8201cd681c700b6cedc6eb458224\": rpc error: code = NotFound desc = could not find container \"93f0b094ffda1d0c6d04df53d6f472d5a34b8201cd681c700b6cedc6eb458224\": container with ID starting with 93f0b094ffda1d0c6d04df53d6f472d5a34b8201cd681c700b6cedc6eb458224 not found: ID does not exist" Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.567311 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-55dc74c94f-t88f4" Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.608059 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-swlpj"] Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.631868 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-dbac-account-create-kk5zc"] Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.643586 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b8f14245-4267-4921-996d-6d192b4c9953-config-data-custom\") pod \"b8f14245-4267-4921-996d-6d192b4c9953\" (UID: \"b8f14245-4267-4921-996d-6d192b4c9953\") " Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.643804 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8f14245-4267-4921-996d-6d192b4c9953-combined-ca-bundle\") pod \"b8f14245-4267-4921-996d-6d192b4c9953\" (UID: \"b8f14245-4267-4921-996d-6d192b4c9953\") " Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.643905 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l4675\" (UniqueName: \"kubernetes.io/projected/b8f14245-4267-4921-996d-6d192b4c9953-kube-api-access-l4675\") pod \"b8f14245-4267-4921-996d-6d192b4c9953\" (UID: \"b8f14245-4267-4921-996d-6d192b4c9953\") " Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.644345 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8f14245-4267-4921-996d-6d192b4c9953-config-data\") pod \"b8f14245-4267-4921-996d-6d192b4c9953\" (UID: \"b8f14245-4267-4921-996d-6d192b4c9953\") " Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.654470 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8f14245-4267-4921-996d-6d192b4c9953-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "b8f14245-4267-4921-996d-6d192b4c9953" (UID: "b8f14245-4267-4921-996d-6d192b4c9953"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.667546 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b8f14245-4267-4921-996d-6d192b4c9953-kube-api-access-l4675" (OuterVolumeSpecName: "kube-api-access-l4675") pod "b8f14245-4267-4921-996d-6d192b4c9953" (UID: "b8f14245-4267-4921-996d-6d192b4c9953"). InnerVolumeSpecName "kube-api-access-l4675". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.723180 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-6d5696cb69-gnxts" Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.739399 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8f14245-4267-4921-996d-6d192b4c9953-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b8f14245-4267-4921-996d-6d192b4c9953" (UID: "b8f14245-4267-4921-996d-6d192b4c9953"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.749157 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-c7l4t" Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.759640 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/964f5abd-0c6c-47cf-82ca-ea31aaf2b522-config-data-custom\") pod \"964f5abd-0c6c-47cf-82ca-ea31aaf2b522\" (UID: \"964f5abd-0c6c-47cf-82ca-ea31aaf2b522\") " Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.759763 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/964f5abd-0c6c-47cf-82ca-ea31aaf2b522-combined-ca-bundle\") pod \"964f5abd-0c6c-47cf-82ca-ea31aaf2b522\" (UID: \"964f5abd-0c6c-47cf-82ca-ea31aaf2b522\") " Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.765887 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/964f5abd-0c6c-47cf-82ca-ea31aaf2b522-config-data\") pod \"964f5abd-0c6c-47cf-82ca-ea31aaf2b522\" (UID: \"964f5abd-0c6c-47cf-82ca-ea31aaf2b522\") " Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.766178 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mwkll\" (UniqueName: \"kubernetes.io/projected/964f5abd-0c6c-47cf-82ca-ea31aaf2b522-kube-api-access-mwkll\") pod \"964f5abd-0c6c-47cf-82ca-ea31aaf2b522\" (UID: \"964f5abd-0c6c-47cf-82ca-ea31aaf2b522\") " Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.767228 4854 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b8f14245-4267-4921-996d-6d192b4c9953-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.767253 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8f14245-4267-4921-996d-6d192b4c9953-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.767266 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l4675\" (UniqueName: \"kubernetes.io/projected/b8f14245-4267-4921-996d-6d192b4c9953-kube-api-access-l4675\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.832507 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/964f5abd-0c6c-47cf-82ca-ea31aaf2b522-kube-api-access-mwkll" (OuterVolumeSpecName: "kube-api-access-mwkll") pod "964f5abd-0c6c-47cf-82ca-ea31aaf2b522" (UID: "964f5abd-0c6c-47cf-82ca-ea31aaf2b522"). InnerVolumeSpecName "kube-api-access-mwkll". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.839046 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/964f5abd-0c6c-47cf-82ca-ea31aaf2b522-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "964f5abd-0c6c-47cf-82ca-ea31aaf2b522" (UID: "964f5abd-0c6c-47cf-82ca-ea31aaf2b522"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.843456 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-6hbdw" event={"ID":"37479b5f-7d9e-4202-8807-6442cf079a33","Type":"ContainerStarted","Data":"f5c47cb81bfbf5630be07e57e0b2d0ef8a3b346a79db8bd438b45183acbe3bde"} Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.843512 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-6hbdw" event={"ID":"37479b5f-7d9e-4202-8807-6442cf079a33","Type":"ContainerStarted","Data":"8ed5de93e97c8810189023b750a664bb5d19d9dae88aa396ac92a73ba7606418"} Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.855748 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-dbac-account-create-kk5zc" event={"ID":"c6d04f91-08d5-484d-98a1-a1b1fc315df4","Type":"ContainerStarted","Data":"3bab87b27b5f8665f1981af3e1a91f50f4083ecc0009225be592afd605b94836"} Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.867078 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.867749 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-55dc74c94f-t88f4" event={"ID":"b8f14245-4267-4921-996d-6d192b4c9953","Type":"ContainerDied","Data":"2811c941f75005cc0849e6b37c2343bb829765aa3d584f45a3eafb60b59e2c8d"} Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.867794 4854 scope.go:117] "RemoveContainer" containerID="f72aa862edcc58565bf471a3e9b31f6609f9c1d12ab0f35ca3b943e40dcd4205" Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.867912 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-db-create-6hbdw" podStartSLOduration=2.8678973020000003 podStartE2EDuration="2.867897302s" podCreationTimestamp="2025-11-25 09:59:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:59:51.865090124 +0000 UTC m=+1397.718083500" watchObservedRunningTime="2025-11-25 09:59:51.867897302 +0000 UTC m=+1397.720890678" Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.867954 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-55dc74c94f-t88f4" Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.871529 4854 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/964f5abd-0c6c-47cf-82ca-ea31aaf2b522-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.871713 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mwkll\" (UniqueName: \"kubernetes.io/projected/964f5abd-0c6c-47cf-82ca-ea31aaf2b522-kube-api-access-mwkll\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.892505 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-6d5696cb69-gnxts" Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.892549 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-6d5696cb69-gnxts" event={"ID":"964f5abd-0c6c-47cf-82ca-ea31aaf2b522","Type":"ContainerDied","Data":"0bbb3429731b7139ebed87acfe9153d296c325f5a9354f29705dccdefd7472f6"} Nov 25 09:59:51 crc kubenswrapper[4854]: I1125 09:59:51.894134 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-swlpj" event={"ID":"affbd45c-173c-492f-b047-3a5db0988607","Type":"ContainerStarted","Data":"77c3689882f76112a6960874d53e043ec15b7553d9a0f6ae5058be1112b116e8"} Nov 25 09:59:52 crc kubenswrapper[4854]: I1125 09:59:52.035979 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="9291c61a-5095-4ccb-a6a0-e1e618bfb501" containerName="glance-httpd" probeResult="failure" output="Get \"https://10.217.0.188:9292/healthcheck\": read tcp 10.217.0.2:49128->10.217.0.188:9292: read: connection reset by peer" Nov 25 09:59:52 crc kubenswrapper[4854]: I1125 09:59:52.036283 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="9291c61a-5095-4ccb-a6a0-e1e618bfb501" containerName="glance-log" probeResult="failure" output="Get \"https://10.217.0.188:9292/healthcheck\": read tcp 10.217.0.2:49138->10.217.0.188:9292: read: connection reset by peer" Nov 25 09:59:52 crc kubenswrapper[4854]: I1125 09:59:52.120903 4854 scope.go:117] "RemoveContainer" containerID="a4446b3026f09ed88d298a11c202587670e200fe68483c61cdeda6a4c22d923e" Nov 25 09:59:52 crc kubenswrapper[4854]: I1125 09:59:52.166634 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-c7l4t" Nov 25 09:59:52 crc kubenswrapper[4854]: I1125 09:59:52.255792 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:59:52 crc kubenswrapper[4854]: I1125 09:59:52.269661 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-7fef-account-create-np5sq"] Nov 25 09:59:52 crc kubenswrapper[4854]: I1125 09:59:52.282087 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-8rxxf"] Nov 25 09:59:52 crc kubenswrapper[4854]: I1125 09:59:52.298735 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-daab-account-create-5pd4d"] Nov 25 09:59:52 crc kubenswrapper[4854]: I1125 09:59:52.316690 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/964f5abd-0c6c-47cf-82ca-ea31aaf2b522-config-data" (OuterVolumeSpecName: "config-data") pod "964f5abd-0c6c-47cf-82ca-ea31aaf2b522" (UID: "964f5abd-0c6c-47cf-82ca-ea31aaf2b522"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:52 crc kubenswrapper[4854]: I1125 09:59:52.385495 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8f14245-4267-4921-996d-6d192b4c9953-config-data" (OuterVolumeSpecName: "config-data") pod "b8f14245-4267-4921-996d-6d192b4c9953" (UID: "b8f14245-4267-4921-996d-6d192b4c9953"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:52 crc kubenswrapper[4854]: I1125 09:59:52.397394 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8f14245-4267-4921-996d-6d192b4c9953-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:52 crc kubenswrapper[4854]: I1125 09:59:52.397646 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/964f5abd-0c6c-47cf-82ca-ea31aaf2b522-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:52 crc kubenswrapper[4854]: I1125 09:59:52.403944 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/964f5abd-0c6c-47cf-82ca-ea31aaf2b522-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "964f5abd-0c6c-47cf-82ca-ea31aaf2b522" (UID: "964f5abd-0c6c-47cf-82ca-ea31aaf2b522"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:52 crc kubenswrapper[4854]: I1125 09:59:52.502150 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/964f5abd-0c6c-47cf-82ca-ea31aaf2b522-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:52 crc kubenswrapper[4854]: I1125 09:59:52.924793 4854 generic.go:334] "Generic (PLEG): container finished" podID="9291c61a-5095-4ccb-a6a0-e1e618bfb501" containerID="3925d0ecbcf492ecf430f31674a2f8cf6d1429050e7fd4aff7d6dae030077326" exitCode=0 Nov 25 09:59:52 crc kubenswrapper[4854]: I1125 09:59:52.925145 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9291c61a-5095-4ccb-a6a0-e1e618bfb501","Type":"ContainerDied","Data":"3925d0ecbcf492ecf430f31674a2f8cf6d1429050e7fd4aff7d6dae030077326"} Nov 25 09:59:52 crc kubenswrapper[4854]: I1125 09:59:52.925183 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9291c61a-5095-4ccb-a6a0-e1e618bfb501","Type":"ContainerDied","Data":"8fb3cb04a044cd35c6dfcba269299d9be3fff9c987b28796f74432362b33851a"} Nov 25 09:59:52 crc kubenswrapper[4854]: I1125 09:59:52.925199 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8fb3cb04a044cd35c6dfcba269299d9be3fff9c987b28796f74432362b33851a" Nov 25 09:59:52 crc kubenswrapper[4854]: I1125 09:59:52.930796 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-7fef-account-create-np5sq" event={"ID":"01b1d826-91f3-4136-b30a-f48b2e6934a9","Type":"ContainerStarted","Data":"d0baf3043835def307abb3671678c39af3184e99d11a0794aa9204497fdfaa59"} Nov 25 09:59:52 crc kubenswrapper[4854]: I1125 09:59:52.945258 4854 generic.go:334] "Generic (PLEG): container finished" podID="37479b5f-7d9e-4202-8807-6442cf079a33" containerID="f5c47cb81bfbf5630be07e57e0b2d0ef8a3b346a79db8bd438b45183acbe3bde" exitCode=0 Nov 25 09:59:52 crc kubenswrapper[4854]: I1125 09:59:52.945365 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-6hbdw" event={"ID":"37479b5f-7d9e-4202-8807-6442cf079a33","Type":"ContainerDied","Data":"f5c47cb81bfbf5630be07e57e0b2d0ef8a3b346a79db8bd438b45183acbe3bde"} Nov 25 09:59:52 crc kubenswrapper[4854]: I1125 09:59:52.958963 4854 generic.go:334] "Generic (PLEG): container finished" podID="affbd45c-173c-492f-b047-3a5db0988607" containerID="1399f465cada421ee03e69385fe6686d9b35d4d927864d5a9f945b6b3e1d2a93" exitCode=0 Nov 25 09:59:52 crc kubenswrapper[4854]: I1125 09:59:52.959053 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-swlpj" event={"ID":"affbd45c-173c-492f-b047-3a5db0988607","Type":"ContainerDied","Data":"1399f465cada421ee03e69385fe6686d9b35d4d927864d5a9f945b6b3e1d2a93"} Nov 25 09:59:52 crc kubenswrapper[4854]: I1125 09:59:52.966402 4854 generic.go:334] "Generic (PLEG): container finished" podID="c6d04f91-08d5-484d-98a1-a1b1fc315df4" containerID="4d00bcdabf49f587df89080c98c8ffeab9b987293a55257c3ad6d8df072b3768" exitCode=0 Nov 25 09:59:52 crc kubenswrapper[4854]: I1125 09:59:52.966498 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-dbac-account-create-kk5zc" event={"ID":"c6d04f91-08d5-484d-98a1-a1b1fc315df4","Type":"ContainerDied","Data":"4d00bcdabf49f587df89080c98c8ffeab9b987293a55257c3ad6d8df072b3768"} Nov 25 09:59:52 crc kubenswrapper[4854]: I1125 09:59:52.969362 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffe5bf34-995f-4ee5-a067-cea929353182","Type":"ContainerStarted","Data":"c92da635116b83da9d8ef2b84770ffb90bb13f5e8986abf04abe965dc1b89f6c"} Nov 25 09:59:52 crc kubenswrapper[4854]: I1125 09:59:52.971688 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-daab-account-create-5pd4d" event={"ID":"d9164e45-ade8-4f78-b89e-e6e3b61e1a4e","Type":"ContainerStarted","Data":"abb16d60be887b9b91315e1c43a1bff4945809a2d8071c6ea139fc38ddc790f7"} Nov 25 09:59:52 crc kubenswrapper[4854]: I1125 09:59:52.973221 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-8rxxf" event={"ID":"2822fcff-eadb-4d68-9297-2940d4573bc7","Type":"ContainerStarted","Data":"b8b181385544c4019357daf8b365082fa6d806f9b5171b9d5c0adca6877f7e7d"} Nov 25 09:59:52 crc kubenswrapper[4854]: I1125 09:59:52.992062 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c67dbcea-a3b9-46ac-833c-97595c61756e","Type":"ContainerDied","Data":"268ac2fd85d2bf853f42fb82f17f834cb4c6cfd52bc5afb8e54cc00830e62329"} Nov 25 09:59:52 crc kubenswrapper[4854]: I1125 09:59:52.992121 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="268ac2fd85d2bf853f42fb82f17f834cb4c6cfd52bc5afb8e54cc00830e62329" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.089086 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.122875 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c67dbcea-a3b9-46ac-833c-97595c61756e-logs\") pod \"c67dbcea-a3b9-46ac-833c-97595c61756e\" (UID: \"c67dbcea-a3b9-46ac-833c-97595c61756e\") " Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.122967 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6r6j2\" (UniqueName: \"kubernetes.io/projected/c67dbcea-a3b9-46ac-833c-97595c61756e-kube-api-access-6r6j2\") pod \"c67dbcea-a3b9-46ac-833c-97595c61756e\" (UID: \"c67dbcea-a3b9-46ac-833c-97595c61756e\") " Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.123001 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c67dbcea-a3b9-46ac-833c-97595c61756e-httpd-run\") pod \"c67dbcea-a3b9-46ac-833c-97595c61756e\" (UID: \"c67dbcea-a3b9-46ac-833c-97595c61756e\") " Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.123132 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c67dbcea-a3b9-46ac-833c-97595c61756e-public-tls-certs\") pod \"c67dbcea-a3b9-46ac-833c-97595c61756e\" (UID: \"c67dbcea-a3b9-46ac-833c-97595c61756e\") " Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.123192 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"c67dbcea-a3b9-46ac-833c-97595c61756e\" (UID: \"c67dbcea-a3b9-46ac-833c-97595c61756e\") " Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.123282 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c67dbcea-a3b9-46ac-833c-97595c61756e-combined-ca-bundle\") pod \"c67dbcea-a3b9-46ac-833c-97595c61756e\" (UID: \"c67dbcea-a3b9-46ac-833c-97595c61756e\") " Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.123365 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c67dbcea-a3b9-46ac-833c-97595c61756e-config-data\") pod \"c67dbcea-a3b9-46ac-833c-97595c61756e\" (UID: \"c67dbcea-a3b9-46ac-833c-97595c61756e\") " Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.123395 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c67dbcea-a3b9-46ac-833c-97595c61756e-scripts\") pod \"c67dbcea-a3b9-46ac-833c-97595c61756e\" (UID: \"c67dbcea-a3b9-46ac-833c-97595c61756e\") " Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.132996 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c67dbcea-a3b9-46ac-833c-97595c61756e-scripts" (OuterVolumeSpecName: "scripts") pod "c67dbcea-a3b9-46ac-833c-97595c61756e" (UID: "c67dbcea-a3b9-46ac-833c-97595c61756e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.139899 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "glance") pod "c67dbcea-a3b9-46ac-833c-97595c61756e" (UID: "c67dbcea-a3b9-46ac-833c-97595c61756e"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.140261 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c67dbcea-a3b9-46ac-833c-97595c61756e-kube-api-access-6r6j2" (OuterVolumeSpecName: "kube-api-access-6r6j2") pod "c67dbcea-a3b9-46ac-833c-97595c61756e" (UID: "c67dbcea-a3b9-46ac-833c-97595c61756e"). InnerVolumeSpecName "kube-api-access-6r6j2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.137923 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c67dbcea-a3b9-46ac-833c-97595c61756e-logs" (OuterVolumeSpecName: "logs") pod "c67dbcea-a3b9-46ac-833c-97595c61756e" (UID: "c67dbcea-a3b9-46ac-833c-97595c61756e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.182846 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c67dbcea-a3b9-46ac-833c-97595c61756e-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "c67dbcea-a3b9-46ac-833c-97595c61756e" (UID: "c67dbcea-a3b9-46ac-833c-97595c61756e"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.230478 4854 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.230617 4854 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c67dbcea-a3b9-46ac-833c-97595c61756e-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.230646 4854 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c67dbcea-a3b9-46ac-833c-97595c61756e-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.230690 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6r6j2\" (UniqueName: \"kubernetes.io/projected/c67dbcea-a3b9-46ac-833c-97595c61756e-kube-api-access-6r6j2\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.230703 4854 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c67dbcea-a3b9-46ac-833c-97595c61756e-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.308180 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-k7pf8"] Nov 25 09:59:53 crc kubenswrapper[4854]: E1125 09:59:53.308804 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8f14245-4267-4921-996d-6d192b4c9953" containerName="heat-api" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.308825 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8f14245-4267-4921-996d-6d192b4c9953" containerName="heat-api" Nov 25 09:59:53 crc kubenswrapper[4854]: E1125 09:59:53.308849 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c67dbcea-a3b9-46ac-833c-97595c61756e" containerName="glance-log" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.308856 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="c67dbcea-a3b9-46ac-833c-97595c61756e" containerName="glance-log" Nov 25 09:59:53 crc kubenswrapper[4854]: E1125 09:59:53.308871 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c67dbcea-a3b9-46ac-833c-97595c61756e" containerName="glance-httpd" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.308877 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="c67dbcea-a3b9-46ac-833c-97595c61756e" containerName="glance-httpd" Nov 25 09:59:53 crc kubenswrapper[4854]: E1125 09:59:53.308908 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="964f5abd-0c6c-47cf-82ca-ea31aaf2b522" containerName="heat-cfnapi" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.308913 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="964f5abd-0c6c-47cf-82ca-ea31aaf2b522" containerName="heat-cfnapi" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.309120 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="c67dbcea-a3b9-46ac-833c-97595c61756e" containerName="glance-log" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.309147 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="964f5abd-0c6c-47cf-82ca-ea31aaf2b522" containerName="heat-cfnapi" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.309178 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="c67dbcea-a3b9-46ac-833c-97595c61756e" containerName="glance-httpd" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.309196 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8f14245-4267-4921-996d-6d192b4c9953" containerName="heat-api" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.311474 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k7pf8" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.321755 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-k7pf8"] Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.358603 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c67dbcea-a3b9-46ac-833c-97595c61756e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c67dbcea-a3b9-46ac-833c-97595c61756e" (UID: "c67dbcea-a3b9-46ac-833c-97595c61756e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.389363 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c67dbcea-a3b9-46ac-833c-97595c61756e-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "c67dbcea-a3b9-46ac-833c-97595c61756e" (UID: "c67dbcea-a3b9-46ac-833c-97595c61756e"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.421192 4854 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.428595 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c67dbcea-a3b9-46ac-833c-97595c61756e-config-data" (OuterVolumeSpecName: "config-data") pod "c67dbcea-a3b9-46ac-833c-97595c61756e" (UID: "c67dbcea-a3b9-46ac-833c-97595c61756e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.435357 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/180d6ba0-1592-4d2a-be8f-c121b6bf618c-utilities\") pod \"certified-operators-k7pf8\" (UID: \"180d6ba0-1592-4d2a-be8f-c121b6bf618c\") " pod="openshift-marketplace/certified-operators-k7pf8" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.435450 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2k2k8\" (UniqueName: \"kubernetes.io/projected/180d6ba0-1592-4d2a-be8f-c121b6bf618c-kube-api-access-2k2k8\") pod \"certified-operators-k7pf8\" (UID: \"180d6ba0-1592-4d2a-be8f-c121b6bf618c\") " pod="openshift-marketplace/certified-operators-k7pf8" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.435779 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/180d6ba0-1592-4d2a-be8f-c121b6bf618c-catalog-content\") pod \"certified-operators-k7pf8\" (UID: \"180d6ba0-1592-4d2a-be8f-c121b6bf618c\") " pod="openshift-marketplace/certified-operators-k7pf8" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.435875 4854 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c67dbcea-a3b9-46ac-833c-97595c61756e-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.435892 4854 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.435904 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c67dbcea-a3b9-46ac-833c-97595c61756e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.435915 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c67dbcea-a3b9-46ac-833c-97595c61756e-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.510048 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.537909 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/180d6ba0-1592-4d2a-be8f-c121b6bf618c-catalog-content\") pod \"certified-operators-k7pf8\" (UID: \"180d6ba0-1592-4d2a-be8f-c121b6bf618c\") " pod="openshift-marketplace/certified-operators-k7pf8" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.537967 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/180d6ba0-1592-4d2a-be8f-c121b6bf618c-utilities\") pod \"certified-operators-k7pf8\" (UID: \"180d6ba0-1592-4d2a-be8f-c121b6bf618c\") " pod="openshift-marketplace/certified-operators-k7pf8" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.538017 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2k2k8\" (UniqueName: \"kubernetes.io/projected/180d6ba0-1592-4d2a-be8f-c121b6bf618c-kube-api-access-2k2k8\") pod \"certified-operators-k7pf8\" (UID: \"180d6ba0-1592-4d2a-be8f-c121b6bf618c\") " pod="openshift-marketplace/certified-operators-k7pf8" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.538653 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/180d6ba0-1592-4d2a-be8f-c121b6bf618c-utilities\") pod \"certified-operators-k7pf8\" (UID: \"180d6ba0-1592-4d2a-be8f-c121b6bf618c\") " pod="openshift-marketplace/certified-operators-k7pf8" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.538818 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/180d6ba0-1592-4d2a-be8f-c121b6bf618c-catalog-content\") pod \"certified-operators-k7pf8\" (UID: \"180d6ba0-1592-4d2a-be8f-c121b6bf618c\") " pod="openshift-marketplace/certified-operators-k7pf8" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.605208 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2k2k8\" (UniqueName: \"kubernetes.io/projected/180d6ba0-1592-4d2a-be8f-c121b6bf618c-kube-api-access-2k2k8\") pod \"certified-operators-k7pf8\" (UID: \"180d6ba0-1592-4d2a-be8f-c121b6bf618c\") " pod="openshift-marketplace/certified-operators-k7pf8" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.611025 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k7pf8" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.642457 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9291c61a-5095-4ccb-a6a0-e1e618bfb501-config-data\") pod \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\" (UID: \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\") " Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.642582 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9291c61a-5095-4ccb-a6a0-e1e618bfb501-httpd-run\") pod \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\" (UID: \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\") " Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.642641 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qdv7s\" (UniqueName: \"kubernetes.io/projected/9291c61a-5095-4ccb-a6a0-e1e618bfb501-kube-api-access-qdv7s\") pod \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\" (UID: \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\") " Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.642695 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9291c61a-5095-4ccb-a6a0-e1e618bfb501-scripts\") pod \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\" (UID: \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\") " Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.645883 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\" (UID: \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\") " Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.645998 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9291c61a-5095-4ccb-a6a0-e1e618bfb501-internal-tls-certs\") pod \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\" (UID: \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\") " Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.646110 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9291c61a-5095-4ccb-a6a0-e1e618bfb501-combined-ca-bundle\") pod \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\" (UID: \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\") " Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.646169 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9291c61a-5095-4ccb-a6a0-e1e618bfb501-logs\") pod \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\" (UID: \"9291c61a-5095-4ccb-a6a0-e1e618bfb501\") " Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.647622 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9291c61a-5095-4ccb-a6a0-e1e618bfb501-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "9291c61a-5095-4ccb-a6a0-e1e618bfb501" (UID: "9291c61a-5095-4ccb-a6a0-e1e618bfb501"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.648628 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9291c61a-5095-4ccb-a6a0-e1e618bfb501-logs" (OuterVolumeSpecName: "logs") pod "9291c61a-5095-4ccb-a6a0-e1e618bfb501" (UID: "9291c61a-5095-4ccb-a6a0-e1e618bfb501"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.654538 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9291c61a-5095-4ccb-a6a0-e1e618bfb501-scripts" (OuterVolumeSpecName: "scripts") pod "9291c61a-5095-4ccb-a6a0-e1e618bfb501" (UID: "9291c61a-5095-4ccb-a6a0-e1e618bfb501"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.655068 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "9291c61a-5095-4ccb-a6a0-e1e618bfb501" (UID: "9291c61a-5095-4ccb-a6a0-e1e618bfb501"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.657877 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-xgtsh" podUID="3405ae80-37c0-433b-99aa-f9e233d61d86" containerName="registry-server" probeResult="failure" output=< Nov 25 09:59:53 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 09:59:53 crc kubenswrapper[4854]: > Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.658407 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9291c61a-5095-4ccb-a6a0-e1e618bfb501-kube-api-access-qdv7s" (OuterVolumeSpecName: "kube-api-access-qdv7s") pod "9291c61a-5095-4ccb-a6a0-e1e618bfb501" (UID: "9291c61a-5095-4ccb-a6a0-e1e618bfb501"). InnerVolumeSpecName "kube-api-access-qdv7s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.751306 4854 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.751345 4854 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9291c61a-5095-4ccb-a6a0-e1e618bfb501-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.751353 4854 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9291c61a-5095-4ccb-a6a0-e1e618bfb501-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.751368 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qdv7s\" (UniqueName: \"kubernetes.io/projected/9291c61a-5095-4ccb-a6a0-e1e618bfb501-kube-api-access-qdv7s\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.751378 4854 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9291c61a-5095-4ccb-a6a0-e1e618bfb501-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.935786 4854 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Nov 25 09:59:53 crc kubenswrapper[4854]: I1125 09:59:53.957764 4854 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.049137 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-7fef-account-create-np5sq" event={"ID":"01b1d826-91f3-4136-b30a-f48b2e6934a9","Type":"ContainerStarted","Data":"dfea7521c7f1ee5364874d541960caedb2f5b5922d180a9f6a4d4a060628aa2e"} Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.049587 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.049759 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.077926 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9291c61a-5095-4ccb-a6a0-e1e618bfb501-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9291c61a-5095-4ccb-a6a0-e1e618bfb501" (UID: "9291c61a-5095-4ccb-a6a0-e1e618bfb501"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.094858 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-7fef-account-create-np5sq" podStartSLOduration=4.094837166 podStartE2EDuration="4.094837166s" podCreationTimestamp="2025-11-25 09:59:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:59:54.071419268 +0000 UTC m=+1399.924412644" watchObservedRunningTime="2025-11-25 09:59:54.094837166 +0000 UTC m=+1399.947830542" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.164195 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9291c61a-5095-4ccb-a6a0-e1e618bfb501-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.187938 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9291c61a-5095-4ccb-a6a0-e1e618bfb501-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "9291c61a-5095-4ccb-a6a0-e1e618bfb501" (UID: "9291c61a-5095-4ccb-a6a0-e1e618bfb501"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.227455 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9291c61a-5095-4ccb-a6a0-e1e618bfb501-config-data" (OuterVolumeSpecName: "config-data") pod "9291c61a-5095-4ccb-a6a0-e1e618bfb501" (UID: "9291c61a-5095-4ccb-a6a0-e1e618bfb501"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.273477 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9291c61a-5095-4ccb-a6a0-e1e618bfb501-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.273507 4854 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9291c61a-5095-4ccb-a6a0-e1e618bfb501-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.388840 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-k7pf8"] Nov 25 09:59:54 crc kubenswrapper[4854]: E1125 09:59:54.474459 4854 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc67dbcea_a3b9_46ac_833c_97595c61756e.slice\": RecentStats: unable to find data in memory cache]" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.526381 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.552015 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.612137 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.666754 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:59:54 crc kubenswrapper[4854]: E1125 09:59:54.668184 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9291c61a-5095-4ccb-a6a0-e1e618bfb501" containerName="glance-log" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.668203 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="9291c61a-5095-4ccb-a6a0-e1e618bfb501" containerName="glance-log" Nov 25 09:59:54 crc kubenswrapper[4854]: E1125 09:59:54.668244 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9291c61a-5095-4ccb-a6a0-e1e618bfb501" containerName="glance-httpd" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.668252 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="9291c61a-5095-4ccb-a6a0-e1e618bfb501" containerName="glance-httpd" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.668875 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="9291c61a-5095-4ccb-a6a0-e1e618bfb501" containerName="glance-httpd" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.668900 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="9291c61a-5095-4ccb-a6a0-e1e618bfb501" containerName="glance-log" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.699127 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.704454 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.707451 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-6k6vk" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.707891 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.708801 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.819968 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bdsvm\" (UniqueName: \"kubernetes.io/projected/953a22a5-3c0c-402b-a4e1-35dfcea8f92f-kube-api-access-bdsvm\") pod \"glance-default-external-api-0\" (UID: \"953a22a5-3c0c-402b-a4e1-35dfcea8f92f\") " pod="openstack/glance-default-external-api-0" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.820021 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.820308 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"953a22a5-3c0c-402b-a4e1-35dfcea8f92f\") " pod="openstack/glance-default-external-api-0" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.820361 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/953a22a5-3c0c-402b-a4e1-35dfcea8f92f-logs\") pod \"glance-default-external-api-0\" (UID: \"953a22a5-3c0c-402b-a4e1-35dfcea8f92f\") " pod="openstack/glance-default-external-api-0" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.820472 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/953a22a5-3c0c-402b-a4e1-35dfcea8f92f-scripts\") pod \"glance-default-external-api-0\" (UID: \"953a22a5-3c0c-402b-a4e1-35dfcea8f92f\") " pod="openstack/glance-default-external-api-0" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.820507 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/953a22a5-3c0c-402b-a4e1-35dfcea8f92f-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"953a22a5-3c0c-402b-a4e1-35dfcea8f92f\") " pod="openstack/glance-default-external-api-0" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.820579 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/953a22a5-3c0c-402b-a4e1-35dfcea8f92f-config-data\") pod \"glance-default-external-api-0\" (UID: \"953a22a5-3c0c-402b-a4e1-35dfcea8f92f\") " pod="openstack/glance-default-external-api-0" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.820610 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/953a22a5-3c0c-402b-a4e1-35dfcea8f92f-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"953a22a5-3c0c-402b-a4e1-35dfcea8f92f\") " pod="openstack/glance-default-external-api-0" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.820911 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/953a22a5-3c0c-402b-a4e1-35dfcea8f92f-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"953a22a5-3c0c-402b-a4e1-35dfcea8f92f\") " pod="openstack/glance-default-external-api-0" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.852809 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.860535 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-dbac-account-create-kk5zc" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.897773 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:59:54 crc kubenswrapper[4854]: E1125 09:59:54.898378 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6d04f91-08d5-484d-98a1-a1b1fc315df4" containerName="mariadb-account-create" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.898398 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6d04f91-08d5-484d-98a1-a1b1fc315df4" containerName="mariadb-account-create" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.898732 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6d04f91-08d5-484d-98a1-a1b1fc315df4" containerName="mariadb-account-create" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.926574 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pl95s\" (UniqueName: \"kubernetes.io/projected/c6d04f91-08d5-484d-98a1-a1b1fc315df4-kube-api-access-pl95s\") pod \"c6d04f91-08d5-484d-98a1-a1b1fc315df4\" (UID: \"c6d04f91-08d5-484d-98a1-a1b1fc315df4\") " Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.926860 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c6d04f91-08d5-484d-98a1-a1b1fc315df4-operator-scripts\") pod \"c6d04f91-08d5-484d-98a1-a1b1fc315df4\" (UID: \"c6d04f91-08d5-484d-98a1-a1b1fc315df4\") " Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.927410 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bdsvm\" (UniqueName: \"kubernetes.io/projected/953a22a5-3c0c-402b-a4e1-35dfcea8f92f-kube-api-access-bdsvm\") pod \"glance-default-external-api-0\" (UID: \"953a22a5-3c0c-402b-a4e1-35dfcea8f92f\") " pod="openstack/glance-default-external-api-0" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.927585 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"953a22a5-3c0c-402b-a4e1-35dfcea8f92f\") " pod="openstack/glance-default-external-api-0" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.927623 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/953a22a5-3c0c-402b-a4e1-35dfcea8f92f-logs\") pod \"glance-default-external-api-0\" (UID: \"953a22a5-3c0c-402b-a4e1-35dfcea8f92f\") " pod="openstack/glance-default-external-api-0" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.927649 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/953a22a5-3c0c-402b-a4e1-35dfcea8f92f-scripts\") pod \"glance-default-external-api-0\" (UID: \"953a22a5-3c0c-402b-a4e1-35dfcea8f92f\") " pod="openstack/glance-default-external-api-0" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.927692 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/953a22a5-3c0c-402b-a4e1-35dfcea8f92f-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"953a22a5-3c0c-402b-a4e1-35dfcea8f92f\") " pod="openstack/glance-default-external-api-0" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.927738 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/953a22a5-3c0c-402b-a4e1-35dfcea8f92f-config-data\") pod \"glance-default-external-api-0\" (UID: \"953a22a5-3c0c-402b-a4e1-35dfcea8f92f\") " pod="openstack/glance-default-external-api-0" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.927763 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/953a22a5-3c0c-402b-a4e1-35dfcea8f92f-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"953a22a5-3c0c-402b-a4e1-35dfcea8f92f\") " pod="openstack/glance-default-external-api-0" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.927852 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/953a22a5-3c0c-402b-a4e1-35dfcea8f92f-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"953a22a5-3c0c-402b-a4e1-35dfcea8f92f\") " pod="openstack/glance-default-external-api-0" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.928928 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/953a22a5-3c0c-402b-a4e1-35dfcea8f92f-logs\") pod \"glance-default-external-api-0\" (UID: \"953a22a5-3c0c-402b-a4e1-35dfcea8f92f\") " pod="openstack/glance-default-external-api-0" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.930235 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.930353 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.936909 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6d04f91-08d5-484d-98a1-a1b1fc315df4-kube-api-access-pl95s" (OuterVolumeSpecName: "kube-api-access-pl95s") pod "c6d04f91-08d5-484d-98a1-a1b1fc315df4" (UID: "c6d04f91-08d5-484d-98a1-a1b1fc315df4"). InnerVolumeSpecName "kube-api-access-pl95s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.937409 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c6d04f91-08d5-484d-98a1-a1b1fc315df4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c6d04f91-08d5-484d-98a1-a1b1fc315df4" (UID: "c6d04f91-08d5-484d-98a1-a1b1fc315df4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.938282 4854 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"953a22a5-3c0c-402b-a4e1-35dfcea8f92f\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-external-api-0" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.939063 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.952514 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/953a22a5-3c0c-402b-a4e1-35dfcea8f92f-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"953a22a5-3c0c-402b-a4e1-35dfcea8f92f\") " pod="openstack/glance-default-external-api-0" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.978535 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/953a22a5-3c0c-402b-a4e1-35dfcea8f92f-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"953a22a5-3c0c-402b-a4e1-35dfcea8f92f\") " pod="openstack/glance-default-external-api-0" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.978925 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.983441 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bdsvm\" (UniqueName: \"kubernetes.io/projected/953a22a5-3c0c-402b-a4e1-35dfcea8f92f-kube-api-access-bdsvm\") pod \"glance-default-external-api-0\" (UID: \"953a22a5-3c0c-402b-a4e1-35dfcea8f92f\") " pod="openstack/glance-default-external-api-0" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.990422 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/953a22a5-3c0c-402b-a4e1-35dfcea8f92f-scripts\") pod \"glance-default-external-api-0\" (UID: \"953a22a5-3c0c-402b-a4e1-35dfcea8f92f\") " pod="openstack/glance-default-external-api-0" Nov 25 09:59:54 crc kubenswrapper[4854]: I1125 09:59:54.991007 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/953a22a5-3c0c-402b-a4e1-35dfcea8f92f-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"953a22a5-3c0c-402b-a4e1-35dfcea8f92f\") " pod="openstack/glance-default-external-api-0" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.004646 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/953a22a5-3c0c-402b-a4e1-35dfcea8f92f-config-data\") pod \"glance-default-external-api-0\" (UID: \"953a22a5-3c0c-402b-a4e1-35dfcea8f92f\") " pod="openstack/glance-default-external-api-0" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.029308 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"02ae5459-cfb7-4cce-a81b-7c0f28eca1aa\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.029638 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/02ae5459-cfb7-4cce-a81b-7c0f28eca1aa-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"02ae5459-cfb7-4cce-a81b-7c0f28eca1aa\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.029686 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02ae5459-cfb7-4cce-a81b-7c0f28eca1aa-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"02ae5459-cfb7-4cce-a81b-7c0f28eca1aa\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.029729 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/02ae5459-cfb7-4cce-a81b-7c0f28eca1aa-scripts\") pod \"glance-default-internal-api-0\" (UID: \"02ae5459-cfb7-4cce-a81b-7c0f28eca1aa\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.030014 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/02ae5459-cfb7-4cce-a81b-7c0f28eca1aa-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"02ae5459-cfb7-4cce-a81b-7c0f28eca1aa\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.030037 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/02ae5459-cfb7-4cce-a81b-7c0f28eca1aa-logs\") pod \"glance-default-internal-api-0\" (UID: \"02ae5459-cfb7-4cce-a81b-7c0f28eca1aa\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.030133 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02ae5459-cfb7-4cce-a81b-7c0f28eca1aa-config-data\") pod \"glance-default-internal-api-0\" (UID: \"02ae5459-cfb7-4cce-a81b-7c0f28eca1aa\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.030193 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hjfrd\" (UniqueName: \"kubernetes.io/projected/02ae5459-cfb7-4cce-a81b-7c0f28eca1aa-kube-api-access-hjfrd\") pod \"glance-default-internal-api-0\" (UID: \"02ae5459-cfb7-4cce-a81b-7c0f28eca1aa\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.030289 4854 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c6d04f91-08d5-484d-98a1-a1b1fc315df4-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.030304 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pl95s\" (UniqueName: \"kubernetes.io/projected/c6d04f91-08d5-484d-98a1-a1b1fc315df4-kube-api-access-pl95s\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.037507 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"953a22a5-3c0c-402b-a4e1-35dfcea8f92f\") " pod="openstack/glance-default-external-api-0" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.076555 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.094601 4854 generic.go:334] "Generic (PLEG): container finished" podID="2822fcff-eadb-4d68-9297-2940d4573bc7" containerID="9b30c9076b3fa761b2b41c56f8333f28124f0b8c191b8351cce45674e79ca7e5" exitCode=0 Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.101611 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-dbac-account-create-kk5zc" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.104052 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9291c61a-5095-4ccb-a6a0-e1e618bfb501" path="/var/lib/kubelet/pods/9291c61a-5095-4ccb-a6a0-e1e618bfb501/volumes" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.108785 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c67dbcea-a3b9-46ac-833c-97595c61756e" path="/var/lib/kubelet/pods/c67dbcea-a3b9-46ac-833c-97595c61756e/volumes" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.115113 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-8rxxf" event={"ID":"2822fcff-eadb-4d68-9297-2940d4573bc7","Type":"ContainerDied","Data":"9b30c9076b3fa761b2b41c56f8333f28124f0b8c191b8351cce45674e79ca7e5"} Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.115652 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-dbac-account-create-kk5zc" event={"ID":"c6d04f91-08d5-484d-98a1-a1b1fc315df4","Type":"ContainerDied","Data":"3bab87b27b5f8665f1981af3e1a91f50f4083ecc0009225be592afd605b94836"} Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.115816 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3bab87b27b5f8665f1981af3e1a91f50f4083ecc0009225be592afd605b94836" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.129829 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.129898 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.129956 4854 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.130945 4854 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"65f6bcfa40b1e5bbb70c379bd608e17d8c0ff4d22430507df2078040825b6744"} pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.131009 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" containerID="cri-o://65f6bcfa40b1e5bbb70c379bd608e17d8c0ff4d22430507df2078040825b6744" gracePeriod=600 Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.177023 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/02ae5459-cfb7-4cce-a81b-7c0f28eca1aa-logs\") pod \"glance-default-internal-api-0\" (UID: \"02ae5459-cfb7-4cce-a81b-7c0f28eca1aa\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.177066 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/02ae5459-cfb7-4cce-a81b-7c0f28eca1aa-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"02ae5459-cfb7-4cce-a81b-7c0f28eca1aa\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.177300 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02ae5459-cfb7-4cce-a81b-7c0f28eca1aa-config-data\") pod \"glance-default-internal-api-0\" (UID: \"02ae5459-cfb7-4cce-a81b-7c0f28eca1aa\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.178625 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/02ae5459-cfb7-4cce-a81b-7c0f28eca1aa-logs\") pod \"glance-default-internal-api-0\" (UID: \"02ae5459-cfb7-4cce-a81b-7c0f28eca1aa\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.180462 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/02ae5459-cfb7-4cce-a81b-7c0f28eca1aa-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"02ae5459-cfb7-4cce-a81b-7c0f28eca1aa\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.191449 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02ae5459-cfb7-4cce-a81b-7c0f28eca1aa-config-data\") pod \"glance-default-internal-api-0\" (UID: \"02ae5459-cfb7-4cce-a81b-7c0f28eca1aa\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.202980 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hjfrd\" (UniqueName: \"kubernetes.io/projected/02ae5459-cfb7-4cce-a81b-7c0f28eca1aa-kube-api-access-hjfrd\") pod \"glance-default-internal-api-0\" (UID: \"02ae5459-cfb7-4cce-a81b-7c0f28eca1aa\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.203183 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"02ae5459-cfb7-4cce-a81b-7c0f28eca1aa\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.203237 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/02ae5459-cfb7-4cce-a81b-7c0f28eca1aa-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"02ae5459-cfb7-4cce-a81b-7c0f28eca1aa\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.203262 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02ae5459-cfb7-4cce-a81b-7c0f28eca1aa-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"02ae5459-cfb7-4cce-a81b-7c0f28eca1aa\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.203317 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/02ae5459-cfb7-4cce-a81b-7c0f28eca1aa-scripts\") pod \"glance-default-internal-api-0\" (UID: \"02ae5459-cfb7-4cce-a81b-7c0f28eca1aa\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.203958 4854 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"02ae5459-cfb7-4cce-a81b-7c0f28eca1aa\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-internal-api-0" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.217534 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/02ae5459-cfb7-4cce-a81b-7c0f28eca1aa-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"02ae5459-cfb7-4cce-a81b-7c0f28eca1aa\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.239231 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffe5bf34-995f-4ee5-a067-cea929353182","Type":"ContainerStarted","Data":"54528e11ec186c07e1c21454810fdc80e8d8671e4a47f5d5fbe7695989b5cdee"} Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.248395 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/02ae5459-cfb7-4cce-a81b-7c0f28eca1aa-scripts\") pod \"glance-default-internal-api-0\" (UID: \"02ae5459-cfb7-4cce-a81b-7c0f28eca1aa\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.261694 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjfrd\" (UniqueName: \"kubernetes.io/projected/02ae5459-cfb7-4cce-a81b-7c0f28eca1aa-kube-api-access-hjfrd\") pod \"glance-default-internal-api-0\" (UID: \"02ae5459-cfb7-4cce-a81b-7c0f28eca1aa\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.269816 4854 generic.go:334] "Generic (PLEG): container finished" podID="d9164e45-ade8-4f78-b89e-e6e3b61e1a4e" containerID="3db0c6c8660a37316f2bae6598892fc702d2d88bfb66027906dc88fd44506139" exitCode=0 Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.270117 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-daab-account-create-5pd4d" event={"ID":"d9164e45-ade8-4f78-b89e-e6e3b61e1a4e","Type":"ContainerDied","Data":"3db0c6c8660a37316f2bae6598892fc702d2d88bfb66027906dc88fd44506139"} Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.285597 4854 generic.go:334] "Generic (PLEG): container finished" podID="01b1d826-91f3-4136-b30a-f48b2e6934a9" containerID="dfea7521c7f1ee5364874d541960caedb2f5b5922d180a9f6a4d4a060628aa2e" exitCode=0 Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.285738 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-7fef-account-create-np5sq" event={"ID":"01b1d826-91f3-4136-b30a-f48b2e6934a9","Type":"ContainerDied","Data":"dfea7521c7f1ee5364874d541960caedb2f5b5922d180a9f6a4d4a060628aa2e"} Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.310450 4854 generic.go:334] "Generic (PLEG): container finished" podID="180d6ba0-1592-4d2a-be8f-c121b6bf618c" containerID="cb307152b9abbf4f85ace9d8fe1685bbccc335f8630367a22f3cf2b126796348" exitCode=0 Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.310499 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k7pf8" event={"ID":"180d6ba0-1592-4d2a-be8f-c121b6bf618c","Type":"ContainerDied","Data":"cb307152b9abbf4f85ace9d8fe1685bbccc335f8630367a22f3cf2b126796348"} Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.310556 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k7pf8" event={"ID":"180d6ba0-1592-4d2a-be8f-c121b6bf618c","Type":"ContainerStarted","Data":"620e7ec9905747b78dd214fa569060c718a753467af80ed442450ad884c82164"} Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.325402 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02ae5459-cfb7-4cce-a81b-7c0f28eca1aa-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"02ae5459-cfb7-4cce-a81b-7c0f28eca1aa\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.386250 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"02ae5459-cfb7-4cce-a81b-7c0f28eca1aa\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.563296 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.572025 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-6hbdw" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.601720 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-c7l4t"] Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.605705 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-c7l4t" podUID="bb7eee5b-75e6-483b-a68e-1d6e39814690" containerName="registry-server" containerID="cri-o://1bc02835fc8d80cd84ee5a8928bc6761510a9f7f44c3ab5538d8a17a5662378b" gracePeriod=2 Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.616314 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-swlpj" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.717268 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/affbd45c-173c-492f-b047-3a5db0988607-operator-scripts\") pod \"affbd45c-173c-492f-b047-3a5db0988607\" (UID: \"affbd45c-173c-492f-b047-3a5db0988607\") " Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.717613 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7tkq5\" (UniqueName: \"kubernetes.io/projected/affbd45c-173c-492f-b047-3a5db0988607-kube-api-access-7tkq5\") pod \"affbd45c-173c-492f-b047-3a5db0988607\" (UID: \"affbd45c-173c-492f-b047-3a5db0988607\") " Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.717809 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2tsdl\" (UniqueName: \"kubernetes.io/projected/37479b5f-7d9e-4202-8807-6442cf079a33-kube-api-access-2tsdl\") pod \"37479b5f-7d9e-4202-8807-6442cf079a33\" (UID: \"37479b5f-7d9e-4202-8807-6442cf079a33\") " Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.717949 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37479b5f-7d9e-4202-8807-6442cf079a33-operator-scripts\") pod \"37479b5f-7d9e-4202-8807-6442cf079a33\" (UID: \"37479b5f-7d9e-4202-8807-6442cf079a33\") " Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.718400 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/affbd45c-173c-492f-b047-3a5db0988607-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "affbd45c-173c-492f-b047-3a5db0988607" (UID: "affbd45c-173c-492f-b047-3a5db0988607"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.718786 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/37479b5f-7d9e-4202-8807-6442cf079a33-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "37479b5f-7d9e-4202-8807-6442cf079a33" (UID: "37479b5f-7d9e-4202-8807-6442cf079a33"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.718928 4854 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37479b5f-7d9e-4202-8807-6442cf079a33-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.718970 4854 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/affbd45c-173c-492f-b047-3a5db0988607-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.741097 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37479b5f-7d9e-4202-8807-6442cf079a33-kube-api-access-2tsdl" (OuterVolumeSpecName: "kube-api-access-2tsdl") pod "37479b5f-7d9e-4202-8807-6442cf079a33" (UID: "37479b5f-7d9e-4202-8807-6442cf079a33"). InnerVolumeSpecName "kube-api-access-2tsdl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.743930 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/affbd45c-173c-492f-b047-3a5db0988607-kube-api-access-7tkq5" (OuterVolumeSpecName: "kube-api-access-7tkq5") pod "affbd45c-173c-492f-b047-3a5db0988607" (UID: "affbd45c-173c-492f-b047-3a5db0988607"). InnerVolumeSpecName "kube-api-access-7tkq5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.843221 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7tkq5\" (UniqueName: \"kubernetes.io/projected/affbd45c-173c-492f-b047-3a5db0988607-kube-api-access-7tkq5\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:55 crc kubenswrapper[4854]: I1125 09:59:55.843682 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2tsdl\" (UniqueName: \"kubernetes.io/projected/37479b5f-7d9e-4202-8807-6442cf079a33-kube-api-access-2tsdl\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:56 crc kubenswrapper[4854]: I1125 09:59:56.020201 4854 scope.go:117] "RemoveContainer" containerID="b71373c5ed36e6f9cd07aefe202391580817607457b50933b56db407c7162435" Nov 25 09:59:56 crc kubenswrapper[4854]: I1125 09:59:56.106395 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:59:56 crc kubenswrapper[4854]: I1125 09:59:56.373979 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"953a22a5-3c0c-402b-a4e1-35dfcea8f92f","Type":"ContainerStarted","Data":"8f66475e04c1773ae5d82cbd77613a92e1d1028f9573d8b852aa0c8243b3b440"} Nov 25 09:59:56 crc kubenswrapper[4854]: I1125 09:59:56.392439 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffe5bf34-995f-4ee5-a067-cea929353182","Type":"ContainerStarted","Data":"47f91c70fed023e2e3bd5bae11818129ba2c9915e4a2b5ab94fd18c983bfab81"} Nov 25 09:59:56 crc kubenswrapper[4854]: I1125 09:59:56.411370 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-swlpj" event={"ID":"affbd45c-173c-492f-b047-3a5db0988607","Type":"ContainerDied","Data":"77c3689882f76112a6960874d53e043ec15b7553d9a0f6ae5058be1112b116e8"} Nov 25 09:59:56 crc kubenswrapper[4854]: I1125 09:59:56.411410 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="77c3689882f76112a6960874d53e043ec15b7553d9a0f6ae5058be1112b116e8" Nov 25 09:59:56 crc kubenswrapper[4854]: I1125 09:59:56.411479 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-swlpj" Nov 25 09:59:56 crc kubenswrapper[4854]: I1125 09:59:56.496935 4854 generic.go:334] "Generic (PLEG): container finished" podID="bb7eee5b-75e6-483b-a68e-1d6e39814690" containerID="1bc02835fc8d80cd84ee5a8928bc6761510a9f7f44c3ab5538d8a17a5662378b" exitCode=0 Nov 25 09:59:56 crc kubenswrapper[4854]: I1125 09:59:56.497060 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c7l4t" event={"ID":"bb7eee5b-75e6-483b-a68e-1d6e39814690","Type":"ContainerDied","Data":"1bc02835fc8d80cd84ee5a8928bc6761510a9f7f44c3ab5538d8a17a5662378b"} Nov 25 09:59:56 crc kubenswrapper[4854]: I1125 09:59:56.522286 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-6hbdw" event={"ID":"37479b5f-7d9e-4202-8807-6442cf079a33","Type":"ContainerDied","Data":"8ed5de93e97c8810189023b750a664bb5d19d9dae88aa396ac92a73ba7606418"} Nov 25 09:59:56 crc kubenswrapper[4854]: I1125 09:59:56.522339 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8ed5de93e97c8810189023b750a664bb5d19d9dae88aa396ac92a73ba7606418" Nov 25 09:59:56 crc kubenswrapper[4854]: I1125 09:59:56.522552 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-6hbdw" Nov 25 09:59:56 crc kubenswrapper[4854]: I1125 09:59:56.544714 4854 generic.go:334] "Generic (PLEG): container finished" podID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerID="65f6bcfa40b1e5bbb70c379bd608e17d8c0ff4d22430507df2078040825b6744" exitCode=0 Nov 25 09:59:56 crc kubenswrapper[4854]: I1125 09:59:56.544866 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerDied","Data":"65f6bcfa40b1e5bbb70c379bd608e17d8c0ff4d22430507df2078040825b6744"} Nov 25 09:59:56 crc kubenswrapper[4854]: I1125 09:59:56.544897 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerStarted","Data":"e218b542fd934fd34b157757f419e89c8565fa64cb58598ebd3da742271577ef"} Nov 25 09:59:56 crc kubenswrapper[4854]: I1125 09:59:56.544913 4854 scope.go:117] "RemoveContainer" containerID="8c88dce0ea083d0b4318356bc4c4cafd9ff804af077bca2201c157b710b82d4d" Nov 25 09:59:56 crc kubenswrapper[4854]: I1125 09:59:56.556434 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c7l4t" Nov 25 09:59:56 crc kubenswrapper[4854]: I1125 09:59:56.559745 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:59:56 crc kubenswrapper[4854]: I1125 09:59:56.706018 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb7eee5b-75e6-483b-a68e-1d6e39814690-catalog-content\") pod \"bb7eee5b-75e6-483b-a68e-1d6e39814690\" (UID: \"bb7eee5b-75e6-483b-a68e-1d6e39814690\") " Nov 25 09:59:56 crc kubenswrapper[4854]: I1125 09:59:56.706156 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qdrw7\" (UniqueName: \"kubernetes.io/projected/bb7eee5b-75e6-483b-a68e-1d6e39814690-kube-api-access-qdrw7\") pod \"bb7eee5b-75e6-483b-a68e-1d6e39814690\" (UID: \"bb7eee5b-75e6-483b-a68e-1d6e39814690\") " Nov 25 09:59:56 crc kubenswrapper[4854]: I1125 09:59:56.706275 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb7eee5b-75e6-483b-a68e-1d6e39814690-utilities\") pod \"bb7eee5b-75e6-483b-a68e-1d6e39814690\" (UID: \"bb7eee5b-75e6-483b-a68e-1d6e39814690\") " Nov 25 09:59:56 crc kubenswrapper[4854]: I1125 09:59:56.716511 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bb7eee5b-75e6-483b-a68e-1d6e39814690-utilities" (OuterVolumeSpecName: "utilities") pod "bb7eee5b-75e6-483b-a68e-1d6e39814690" (UID: "bb7eee5b-75e6-483b-a68e-1d6e39814690"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:59:56 crc kubenswrapper[4854]: I1125 09:59:56.745137 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bb7eee5b-75e6-483b-a68e-1d6e39814690-kube-api-access-qdrw7" (OuterVolumeSpecName: "kube-api-access-qdrw7") pod "bb7eee5b-75e6-483b-a68e-1d6e39814690" (UID: "bb7eee5b-75e6-483b-a68e-1d6e39814690"). InnerVolumeSpecName "kube-api-access-qdrw7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:59:56 crc kubenswrapper[4854]: I1125 09:59:56.810427 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qdrw7\" (UniqueName: \"kubernetes.io/projected/bb7eee5b-75e6-483b-a68e-1d6e39814690-kube-api-access-qdrw7\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:56 crc kubenswrapper[4854]: I1125 09:59:56.810455 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb7eee5b-75e6-483b-a68e-1d6e39814690-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.035729 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bb7eee5b-75e6-483b-a68e-1d6e39814690-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bb7eee5b-75e6-483b-a68e-1d6e39814690" (UID: "bb7eee5b-75e6-483b-a68e-1d6e39814690"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.124441 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb7eee5b-75e6-483b-a68e-1d6e39814690-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.573865 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c7l4t" event={"ID":"bb7eee5b-75e6-483b-a68e-1d6e39814690","Type":"ContainerDied","Data":"952c31334cacf038348868a85287ab05aa0456ae2e7c34822d47a2fa60ab4a03"} Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.573914 4854 scope.go:117] "RemoveContainer" containerID="1bc02835fc8d80cd84ee5a8928bc6761510a9f7f44c3ab5538d8a17a5662378b" Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.574022 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c7l4t" Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.582529 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-7fef-account-create-np5sq" event={"ID":"01b1d826-91f3-4136-b30a-f48b2e6934a9","Type":"ContainerDied","Data":"d0baf3043835def307abb3671678c39af3184e99d11a0794aa9204497fdfaa59"} Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.582570 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d0baf3043835def307abb3671678c39af3184e99d11a0794aa9204497fdfaa59" Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.585357 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"02ae5459-cfb7-4cce-a81b-7c0f28eca1aa","Type":"ContainerStarted","Data":"75056bcf0608ed5d364a3eede6422a5d122657946e4774ef67a0a44010dbceb3"} Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.593485 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-daab-account-create-5pd4d" event={"ID":"d9164e45-ade8-4f78-b89e-e6e3b61e1a4e","Type":"ContainerDied","Data":"abb16d60be887b9b91315e1c43a1bff4945809a2d8071c6ea139fc38ddc790f7"} Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.593525 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="abb16d60be887b9b91315e1c43a1bff4945809a2d8071c6ea139fc38ddc790f7" Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.593570 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-7fef-account-create-np5sq" Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.608437 4854 generic.go:334] "Generic (PLEG): container finished" podID="db0965e6-0374-4b14-875e-8557c346815e" containerID="d4fb3b9762a03acadacf705e7450e2146667caeebbf54e2b5b1d220aa3bb4cd9" exitCode=1 Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.608509 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-5469c8bfd4-mmvtn" event={"ID":"db0965e6-0374-4b14-875e-8557c346815e","Type":"ContainerDied","Data":"d4fb3b9762a03acadacf705e7450e2146667caeebbf54e2b5b1d220aa3bb4cd9"} Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.609524 4854 scope.go:117] "RemoveContainer" containerID="d4fb3b9762a03acadacf705e7450e2146667caeebbf54e2b5b1d220aa3bb4cd9" Nov 25 09:59:57 crc kubenswrapper[4854]: E1125 09:59:57.610023 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 20s restarting failed container=heat-cfnapi pod=heat-cfnapi-5469c8bfd4-mmvtn_openstack(db0965e6-0374-4b14-875e-8557c346815e)\"" pod="openstack/heat-cfnapi-5469c8bfd4-mmvtn" podUID="db0965e6-0374-4b14-875e-8557c346815e" Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.613603 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-8rxxf" event={"ID":"2822fcff-eadb-4d68-9297-2940d4573bc7","Type":"ContainerDied","Data":"b8b181385544c4019357daf8b365082fa6d806f9b5171b9d5c0adca6877f7e7d"} Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.613636 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b8b181385544c4019357daf8b365082fa6d806f9b5171b9d5c0adca6877f7e7d" Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.617850 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-8rxxf" Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.629856 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-daab-account-create-5pd4d" Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.637873 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffe5bf34-995f-4ee5-a067-cea929353182","Type":"ContainerStarted","Data":"9811aaa30222aa125f7b74099ab6a86f6e540741251fe8c6228266a7f55abe45"} Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.638422 4854 scope.go:117] "RemoveContainer" containerID="6a039f9dc44fb5dfaaaf7b1ecef5eb7f72b10bb71aad1c7da087a002ba5b9154" Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.649840 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-c7l4t"] Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.657325 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k7pf8" event={"ID":"180d6ba0-1592-4d2a-be8f-c121b6bf618c","Type":"ContainerStarted","Data":"078ec9a90435e71ae681447221ff162b9e13be26859acb5b11f00d98694e0dde"} Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.673404 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-c7l4t"] Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.734900 4854 scope.go:117] "RemoveContainer" containerID="197a7e26a217fb846260cbf5d78440d2cb98e2bd87d1290fe2588006a595a810" Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.742477 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gnmvv\" (UniqueName: \"kubernetes.io/projected/d9164e45-ade8-4f78-b89e-e6e3b61e1a4e-kube-api-access-gnmvv\") pod \"d9164e45-ade8-4f78-b89e-e6e3b61e1a4e\" (UID: \"d9164e45-ade8-4f78-b89e-e6e3b61e1a4e\") " Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.742543 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/01b1d826-91f3-4136-b30a-f48b2e6934a9-operator-scripts\") pod \"01b1d826-91f3-4136-b30a-f48b2e6934a9\" (UID: \"01b1d826-91f3-4136-b30a-f48b2e6934a9\") " Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.742849 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d9164e45-ade8-4f78-b89e-e6e3b61e1a4e-operator-scripts\") pod \"d9164e45-ade8-4f78-b89e-e6e3b61e1a4e\" (UID: \"d9164e45-ade8-4f78-b89e-e6e3b61e1a4e\") " Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.742887 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5tmxf\" (UniqueName: \"kubernetes.io/projected/01b1d826-91f3-4136-b30a-f48b2e6934a9-kube-api-access-5tmxf\") pod \"01b1d826-91f3-4136-b30a-f48b2e6934a9\" (UID: \"01b1d826-91f3-4136-b30a-f48b2e6934a9\") " Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.742961 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9bfg6\" (UniqueName: \"kubernetes.io/projected/2822fcff-eadb-4d68-9297-2940d4573bc7-kube-api-access-9bfg6\") pod \"2822fcff-eadb-4d68-9297-2940d4573bc7\" (UID: \"2822fcff-eadb-4d68-9297-2940d4573bc7\") " Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.743031 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2822fcff-eadb-4d68-9297-2940d4573bc7-operator-scripts\") pod \"2822fcff-eadb-4d68-9297-2940d4573bc7\" (UID: \"2822fcff-eadb-4d68-9297-2940d4573bc7\") " Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.745213 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01b1d826-91f3-4136-b30a-f48b2e6934a9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "01b1d826-91f3-4136-b30a-f48b2e6934a9" (UID: "01b1d826-91f3-4136-b30a-f48b2e6934a9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.745268 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2822fcff-eadb-4d68-9297-2940d4573bc7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2822fcff-eadb-4d68-9297-2940d4573bc7" (UID: "2822fcff-eadb-4d68-9297-2940d4573bc7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.749078 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d9164e45-ade8-4f78-b89e-e6e3b61e1a4e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d9164e45-ade8-4f78-b89e-e6e3b61e1a4e" (UID: "d9164e45-ade8-4f78-b89e-e6e3b61e1a4e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.757985 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01b1d826-91f3-4136-b30a-f48b2e6934a9-kube-api-access-5tmxf" (OuterVolumeSpecName: "kube-api-access-5tmxf") pod "01b1d826-91f3-4136-b30a-f48b2e6934a9" (UID: "01b1d826-91f3-4136-b30a-f48b2e6934a9"). InnerVolumeSpecName "kube-api-access-5tmxf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.759574 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2822fcff-eadb-4d68-9297-2940d4573bc7-kube-api-access-9bfg6" (OuterVolumeSpecName: "kube-api-access-9bfg6") pod "2822fcff-eadb-4d68-9297-2940d4573bc7" (UID: "2822fcff-eadb-4d68-9297-2940d4573bc7"). InnerVolumeSpecName "kube-api-access-9bfg6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.759860 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d9164e45-ade8-4f78-b89e-e6e3b61e1a4e-kube-api-access-gnmvv" (OuterVolumeSpecName: "kube-api-access-gnmvv") pod "d9164e45-ade8-4f78-b89e-e6e3b61e1a4e" (UID: "d9164e45-ade8-4f78-b89e-e6e3b61e1a4e"). InnerVolumeSpecName "kube-api-access-gnmvv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.845686 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9bfg6\" (UniqueName: \"kubernetes.io/projected/2822fcff-eadb-4d68-9297-2940d4573bc7-kube-api-access-9bfg6\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.845722 4854 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2822fcff-eadb-4d68-9297-2940d4573bc7-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.845734 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gnmvv\" (UniqueName: \"kubernetes.io/projected/d9164e45-ade8-4f78-b89e-e6e3b61e1a4e-kube-api-access-gnmvv\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.845743 4854 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/01b1d826-91f3-4136-b30a-f48b2e6934a9-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.845754 4854 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d9164e45-ade8-4f78-b89e-e6e3b61e1a4e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.845763 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5tmxf\" (UniqueName: \"kubernetes.io/projected/01b1d826-91f3-4136-b30a-f48b2e6934a9-kube-api-access-5tmxf\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.849189 4854 scope.go:117] "RemoveContainer" containerID="b71373c5ed36e6f9cd07aefe202391580817607457b50933b56db407c7162435" Nov 25 09:59:57 crc kubenswrapper[4854]: I1125 09:59:57.971700 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-7cf6b8b6cd-csh8n" Nov 25 09:59:58 crc kubenswrapper[4854]: I1125 09:59:58.020898 4854 scope.go:117] "RemoveContainer" containerID="53f9f165f353b1956da53b03a71fc50c1ecfbf30fb2f590408f1a56169f39075" Nov 25 09:59:58 crc kubenswrapper[4854]: I1125 09:59:58.133482 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-d874db4c8-mzdgt"] Nov 25 09:59:58 crc kubenswrapper[4854]: I1125 09:59:58.253492 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-756b755c6-wqm2k" Nov 25 09:59:58 crc kubenswrapper[4854]: I1125 09:59:58.329817 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-5469c8bfd4-mmvtn"] Nov 25 09:59:58 crc kubenswrapper[4854]: I1125 09:59:58.715467 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"953a22a5-3c0c-402b-a4e1-35dfcea8f92f","Type":"ContainerStarted","Data":"b97df5e70877f442cd13a6585fd0ca4711d2958623bef6ad9b33b3159e4ee6fb"} Nov 25 09:59:58 crc kubenswrapper[4854]: I1125 09:59:58.718236 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-daab-account-create-5pd4d" Nov 25 09:59:58 crc kubenswrapper[4854]: I1125 09:59:58.718463 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-7fef-account-create-np5sq" Nov 25 09:59:58 crc kubenswrapper[4854]: I1125 09:59:58.718590 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-8rxxf" Nov 25 09:59:58 crc kubenswrapper[4854]: I1125 09:59:58.718926 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-d874db4c8-mzdgt" Nov 25 09:59:58 crc kubenswrapper[4854]: I1125 09:59:58.718938 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-api-d874db4c8-mzdgt" podUID="2fc43285-4e2b-4d05-b52b-b446b200723e" containerName="heat-api" containerID="cri-o://51322ecde32cacf588dbcaa0a975a0737f7f0345ccb707018ce0564a3db05c19" gracePeriod=60 Nov 25 09:59:59 crc kubenswrapper[4854]: I1125 09:59:59.050471 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bb7eee5b-75e6-483b-a68e-1d6e39814690" path="/var/lib/kubelet/pods/bb7eee5b-75e6-483b-a68e-1d6e39814690/volumes" Nov 25 09:59:59 crc kubenswrapper[4854]: I1125 09:59:59.604995 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-5469c8bfd4-mmvtn" Nov 25 09:59:59 crc kubenswrapper[4854]: I1125 09:59:59.729416 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/db0965e6-0374-4b14-875e-8557c346815e-config-data-custom\") pod \"db0965e6-0374-4b14-875e-8557c346815e\" (UID: \"db0965e6-0374-4b14-875e-8557c346815e\") " Nov 25 09:59:59 crc kubenswrapper[4854]: I1125 09:59:59.729534 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db0965e6-0374-4b14-875e-8557c346815e-config-data\") pod \"db0965e6-0374-4b14-875e-8557c346815e\" (UID: \"db0965e6-0374-4b14-875e-8557c346815e\") " Nov 25 09:59:59 crc kubenswrapper[4854]: I1125 09:59:59.729695 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fn8rg\" (UniqueName: \"kubernetes.io/projected/db0965e6-0374-4b14-875e-8557c346815e-kube-api-access-fn8rg\") pod \"db0965e6-0374-4b14-875e-8557c346815e\" (UID: \"db0965e6-0374-4b14-875e-8557c346815e\") " Nov 25 09:59:59 crc kubenswrapper[4854]: I1125 09:59:59.729869 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db0965e6-0374-4b14-875e-8557c346815e-combined-ca-bundle\") pod \"db0965e6-0374-4b14-875e-8557c346815e\" (UID: \"db0965e6-0374-4b14-875e-8557c346815e\") " Nov 25 09:59:59 crc kubenswrapper[4854]: I1125 09:59:59.747336 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db0965e6-0374-4b14-875e-8557c346815e-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "db0965e6-0374-4b14-875e-8557c346815e" (UID: "db0965e6-0374-4b14-875e-8557c346815e"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:59 crc kubenswrapper[4854]: I1125 09:59:59.749494 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db0965e6-0374-4b14-875e-8557c346815e-kube-api-access-fn8rg" (OuterVolumeSpecName: "kube-api-access-fn8rg") pod "db0965e6-0374-4b14-875e-8557c346815e" (UID: "db0965e6-0374-4b14-875e-8557c346815e"). InnerVolumeSpecName "kube-api-access-fn8rg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:59:59 crc kubenswrapper[4854]: I1125 09:59:59.778806 4854 generic.go:334] "Generic (PLEG): container finished" podID="180d6ba0-1592-4d2a-be8f-c121b6bf618c" containerID="078ec9a90435e71ae681447221ff162b9e13be26859acb5b11f00d98694e0dde" exitCode=0 Nov 25 09:59:59 crc kubenswrapper[4854]: I1125 09:59:59.779163 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k7pf8" event={"ID":"180d6ba0-1592-4d2a-be8f-c121b6bf618c","Type":"ContainerDied","Data":"078ec9a90435e71ae681447221ff162b9e13be26859acb5b11f00d98694e0dde"} Nov 25 09:59:59 crc kubenswrapper[4854]: I1125 09:59:59.786601 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db0965e6-0374-4b14-875e-8557c346815e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "db0965e6-0374-4b14-875e-8557c346815e" (UID: "db0965e6-0374-4b14-875e-8557c346815e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:59 crc kubenswrapper[4854]: I1125 09:59:59.810984 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffe5bf34-995f-4ee5-a067-cea929353182","Type":"ContainerStarted","Data":"15ec4b0784e877ae274026289ab0d6762159e4676f2290f939a6caf0583ce7f5"} Nov 25 09:59:59 crc kubenswrapper[4854]: I1125 09:59:59.811286 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ffe5bf34-995f-4ee5-a067-cea929353182" containerName="ceilometer-central-agent" containerID="cri-o://54528e11ec186c07e1c21454810fdc80e8d8671e4a47f5d5fbe7695989b5cdee" gracePeriod=30 Nov 25 09:59:59 crc kubenswrapper[4854]: I1125 09:59:59.811585 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 09:59:59 crc kubenswrapper[4854]: I1125 09:59:59.812006 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ffe5bf34-995f-4ee5-a067-cea929353182" containerName="proxy-httpd" containerID="cri-o://15ec4b0784e877ae274026289ab0d6762159e4676f2290f939a6caf0583ce7f5" gracePeriod=30 Nov 25 09:59:59 crc kubenswrapper[4854]: I1125 09:59:59.812055 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ffe5bf34-995f-4ee5-a067-cea929353182" containerName="sg-core" containerID="cri-o://9811aaa30222aa125f7b74099ab6a86f6e540741251fe8c6228266a7f55abe45" gracePeriod=30 Nov 25 09:59:59 crc kubenswrapper[4854]: I1125 09:59:59.812110 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ffe5bf34-995f-4ee5-a067-cea929353182" containerName="ceilometer-notification-agent" containerID="cri-o://47f91c70fed023e2e3bd5bae11818129ba2c9915e4a2b5ab94fd18c983bfab81" gracePeriod=30 Nov 25 09:59:59 crc kubenswrapper[4854]: I1125 09:59:59.821293 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"02ae5459-cfb7-4cce-a81b-7c0f28eca1aa","Type":"ContainerStarted","Data":"8aa51fa91b52bc352b5f4d0abc2f9cc9d2c37ad737bb626b87d495ff46c15893"} Nov 25 09:59:59 crc kubenswrapper[4854]: I1125 09:59:59.835494 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fn8rg\" (UniqueName: \"kubernetes.io/projected/db0965e6-0374-4b14-875e-8557c346815e-kube-api-access-fn8rg\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:59 crc kubenswrapper[4854]: I1125 09:59:59.835523 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db0965e6-0374-4b14-875e-8557c346815e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:59 crc kubenswrapper[4854]: I1125 09:59:59.835552 4854 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/db0965e6-0374-4b14-875e-8557c346815e-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:59 crc kubenswrapper[4854]: I1125 09:59:59.836412 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-d874db4c8-mzdgt" event={"ID":"2fc43285-4e2b-4d05-b52b-b446b200723e","Type":"ContainerDied","Data":"51322ecde32cacf588dbcaa0a975a0737f7f0345ccb707018ce0564a3db05c19"} Nov 25 09:59:59 crc kubenswrapper[4854]: I1125 09:59:59.836378 4854 generic.go:334] "Generic (PLEG): container finished" podID="2fc43285-4e2b-4d05-b52b-b446b200723e" containerID="51322ecde32cacf588dbcaa0a975a0737f7f0345ccb707018ce0564a3db05c19" exitCode=1 Nov 25 09:59:59 crc kubenswrapper[4854]: I1125 09:59:59.836496 4854 scope.go:117] "RemoveContainer" containerID="53f9f165f353b1956da53b03a71fc50c1ecfbf30fb2f590408f1a56169f39075" Nov 25 09:59:59 crc kubenswrapper[4854]: I1125 09:59:59.859702 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-5469c8bfd4-mmvtn" event={"ID":"db0965e6-0374-4b14-875e-8557c346815e","Type":"ContainerDied","Data":"5d11d06b24dd4efdae5bd14ec30aa76bfb9485473af1462fd585e606e35fd6c6"} Nov 25 09:59:59 crc kubenswrapper[4854]: I1125 09:59:59.859788 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-5469c8bfd4-mmvtn" Nov 25 09:59:59 crc kubenswrapper[4854]: I1125 09:59:59.888219 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.671966275 podStartE2EDuration="9.888192346s" podCreationTimestamp="2025-11-25 09:59:50 +0000 UTC" firstStartedPulling="2025-11-25 09:59:52.357272261 +0000 UTC m=+1398.210265637" lastFinishedPulling="2025-11-25 09:59:58.573498332 +0000 UTC m=+1404.426491708" observedRunningTime="2025-11-25 09:59:59.846078711 +0000 UTC m=+1405.699072097" watchObservedRunningTime="2025-11-25 09:59:59.888192346 +0000 UTC m=+1405.741185722" Nov 25 09:59:59 crc kubenswrapper[4854]: I1125 09:59:59.919117 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db0965e6-0374-4b14-875e-8557c346815e-config-data" (OuterVolumeSpecName: "config-data") pod "db0965e6-0374-4b14-875e-8557c346815e" (UID: "db0965e6-0374-4b14-875e-8557c346815e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:59:59 crc kubenswrapper[4854]: I1125 09:59:59.940639 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db0965e6-0374-4b14-875e-8557c346815e-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:59 crc kubenswrapper[4854]: I1125 09:59:59.961733 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-d874db4c8-mzdgt" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.003888 4854 scope.go:117] "RemoveContainer" containerID="d4fb3b9762a03acadacf705e7450e2146667caeebbf54e2b5b1d220aa3bb4cd9" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.046601 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fc43285-4e2b-4d05-b52b-b446b200723e-config-data\") pod \"2fc43285-4e2b-4d05-b52b-b446b200723e\" (UID: \"2fc43285-4e2b-4d05-b52b-b446b200723e\") " Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.046639 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qkx4k\" (UniqueName: \"kubernetes.io/projected/2fc43285-4e2b-4d05-b52b-b446b200723e-kube-api-access-qkx4k\") pod \"2fc43285-4e2b-4d05-b52b-b446b200723e\" (UID: \"2fc43285-4e2b-4d05-b52b-b446b200723e\") " Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.046728 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2fc43285-4e2b-4d05-b52b-b446b200723e-config-data-custom\") pod \"2fc43285-4e2b-4d05-b52b-b446b200723e\" (UID: \"2fc43285-4e2b-4d05-b52b-b446b200723e\") " Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.046798 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fc43285-4e2b-4d05-b52b-b446b200723e-combined-ca-bundle\") pod \"2fc43285-4e2b-4d05-b52b-b446b200723e\" (UID: \"2fc43285-4e2b-4d05-b52b-b446b200723e\") " Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.067461 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2fc43285-4e2b-4d05-b52b-b446b200723e-kube-api-access-qkx4k" (OuterVolumeSpecName: "kube-api-access-qkx4k") pod "2fc43285-4e2b-4d05-b52b-b446b200723e" (UID: "2fc43285-4e2b-4d05-b52b-b446b200723e"). InnerVolumeSpecName "kube-api-access-qkx4k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.082210 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2fc43285-4e2b-4d05-b52b-b446b200723e-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "2fc43285-4e2b-4d05-b52b-b446b200723e" (UID: "2fc43285-4e2b-4d05-b52b-b446b200723e"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.152332 4854 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2fc43285-4e2b-4d05-b52b-b446b200723e-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.152687 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qkx4k\" (UniqueName: \"kubernetes.io/projected/2fc43285-4e2b-4d05-b52b-b446b200723e-kube-api-access-qkx4k\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.182304 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401080-wnzgj"] Nov 25 10:00:00 crc kubenswrapper[4854]: E1125 10:00:00.191063 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db0965e6-0374-4b14-875e-8557c346815e" containerName="heat-cfnapi" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.191099 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="db0965e6-0374-4b14-875e-8557c346815e" containerName="heat-cfnapi" Nov 25 10:00:00 crc kubenswrapper[4854]: E1125 10:00:00.191116 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb7eee5b-75e6-483b-a68e-1d6e39814690" containerName="extract-content" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.191123 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb7eee5b-75e6-483b-a68e-1d6e39814690" containerName="extract-content" Nov 25 10:00:00 crc kubenswrapper[4854]: E1125 10:00:00.191149 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="affbd45c-173c-492f-b047-3a5db0988607" containerName="mariadb-database-create" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.191156 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="affbd45c-173c-492f-b047-3a5db0988607" containerName="mariadb-database-create" Nov 25 10:00:00 crc kubenswrapper[4854]: E1125 10:00:00.191170 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fc43285-4e2b-4d05-b52b-b446b200723e" containerName="heat-api" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.191177 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fc43285-4e2b-4d05-b52b-b446b200723e" containerName="heat-api" Nov 25 10:00:00 crc kubenswrapper[4854]: E1125 10:00:00.191188 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01b1d826-91f3-4136-b30a-f48b2e6934a9" containerName="mariadb-account-create" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.191193 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="01b1d826-91f3-4136-b30a-f48b2e6934a9" containerName="mariadb-account-create" Nov 25 10:00:00 crc kubenswrapper[4854]: E1125 10:00:00.191209 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb7eee5b-75e6-483b-a68e-1d6e39814690" containerName="extract-utilities" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.191217 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb7eee5b-75e6-483b-a68e-1d6e39814690" containerName="extract-utilities" Nov 25 10:00:00 crc kubenswrapper[4854]: E1125 10:00:00.191223 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb7eee5b-75e6-483b-a68e-1d6e39814690" containerName="registry-server" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.191229 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb7eee5b-75e6-483b-a68e-1d6e39814690" containerName="registry-server" Nov 25 10:00:00 crc kubenswrapper[4854]: E1125 10:00:00.191243 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9164e45-ade8-4f78-b89e-e6e3b61e1a4e" containerName="mariadb-account-create" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.191249 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9164e45-ade8-4f78-b89e-e6e3b61e1a4e" containerName="mariadb-account-create" Nov 25 10:00:00 crc kubenswrapper[4854]: E1125 10:00:00.191267 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fc43285-4e2b-4d05-b52b-b446b200723e" containerName="heat-api" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.191274 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fc43285-4e2b-4d05-b52b-b446b200723e" containerName="heat-api" Nov 25 10:00:00 crc kubenswrapper[4854]: E1125 10:00:00.191292 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db0965e6-0374-4b14-875e-8557c346815e" containerName="heat-cfnapi" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.191298 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="db0965e6-0374-4b14-875e-8557c346815e" containerName="heat-cfnapi" Nov 25 10:00:00 crc kubenswrapper[4854]: E1125 10:00:00.191311 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37479b5f-7d9e-4202-8807-6442cf079a33" containerName="mariadb-database-create" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.191317 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="37479b5f-7d9e-4202-8807-6442cf079a33" containerName="mariadb-database-create" Nov 25 10:00:00 crc kubenswrapper[4854]: E1125 10:00:00.191327 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2822fcff-eadb-4d68-9297-2940d4573bc7" containerName="mariadb-database-create" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.191333 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="2822fcff-eadb-4d68-9297-2940d4573bc7" containerName="mariadb-database-create" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.191727 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="db0965e6-0374-4b14-875e-8557c346815e" containerName="heat-cfnapi" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.191748 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb7eee5b-75e6-483b-a68e-1d6e39814690" containerName="registry-server" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.191764 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="db0965e6-0374-4b14-875e-8557c346815e" containerName="heat-cfnapi" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.191776 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="d9164e45-ade8-4f78-b89e-e6e3b61e1a4e" containerName="mariadb-account-create" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.191790 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fc43285-4e2b-4d05-b52b-b446b200723e" containerName="heat-api" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.191800 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="01b1d826-91f3-4136-b30a-f48b2e6934a9" containerName="mariadb-account-create" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.191822 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="affbd45c-173c-492f-b047-3a5db0988607" containerName="mariadb-database-create" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.191831 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fc43285-4e2b-4d05-b52b-b446b200723e" containerName="heat-api" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.191844 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fc43285-4e2b-4d05-b52b-b446b200723e" containerName="heat-api" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.191858 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="2822fcff-eadb-4d68-9297-2940d4573bc7" containerName="mariadb-database-create" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.191873 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="37479b5f-7d9e-4202-8807-6442cf079a33" containerName="mariadb-database-create" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.192663 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-wnzgj" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.196178 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.196442 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.223286 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401080-wnzgj"] Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.259408 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qqzzp\" (UniqueName: \"kubernetes.io/projected/d61e667f-63aa-47e8-b22e-4a515dc5d81d-kube-api-access-qqzzp\") pod \"collect-profiles-29401080-wnzgj\" (UID: \"d61e667f-63aa-47e8-b22e-4a515dc5d81d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-wnzgj" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.259777 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d61e667f-63aa-47e8-b22e-4a515dc5d81d-secret-volume\") pod \"collect-profiles-29401080-wnzgj\" (UID: \"d61e667f-63aa-47e8-b22e-4a515dc5d81d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-wnzgj" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.259841 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d61e667f-63aa-47e8-b22e-4a515dc5d81d-config-volume\") pod \"collect-profiles-29401080-wnzgj\" (UID: \"d61e667f-63aa-47e8-b22e-4a515dc5d81d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-wnzgj" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.311457 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2fc43285-4e2b-4d05-b52b-b446b200723e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2fc43285-4e2b-4d05-b52b-b446b200723e" (UID: "2fc43285-4e2b-4d05-b52b-b446b200723e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.313616 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-f5c9d8946-qfdms" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.367112 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qqzzp\" (UniqueName: \"kubernetes.io/projected/d61e667f-63aa-47e8-b22e-4a515dc5d81d-kube-api-access-qqzzp\") pod \"collect-profiles-29401080-wnzgj\" (UID: \"d61e667f-63aa-47e8-b22e-4a515dc5d81d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-wnzgj" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.367440 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d61e667f-63aa-47e8-b22e-4a515dc5d81d-secret-volume\") pod \"collect-profiles-29401080-wnzgj\" (UID: \"d61e667f-63aa-47e8-b22e-4a515dc5d81d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-wnzgj" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.367498 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d61e667f-63aa-47e8-b22e-4a515dc5d81d-config-volume\") pod \"collect-profiles-29401080-wnzgj\" (UID: \"d61e667f-63aa-47e8-b22e-4a515dc5d81d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-wnzgj" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.367696 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fc43285-4e2b-4d05-b52b-b446b200723e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.377968 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d61e667f-63aa-47e8-b22e-4a515dc5d81d-config-volume\") pod \"collect-profiles-29401080-wnzgj\" (UID: \"d61e667f-63aa-47e8-b22e-4a515dc5d81d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-wnzgj" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.446333 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d61e667f-63aa-47e8-b22e-4a515dc5d81d-secret-volume\") pod \"collect-profiles-29401080-wnzgj\" (UID: \"d61e667f-63aa-47e8-b22e-4a515dc5d81d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-wnzgj" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.457207 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qqzzp\" (UniqueName: \"kubernetes.io/projected/d61e667f-63aa-47e8-b22e-4a515dc5d81d-kube-api-access-qqzzp\") pod \"collect-profiles-29401080-wnzgj\" (UID: \"d61e667f-63aa-47e8-b22e-4a515dc5d81d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-wnzgj" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.462332 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-dr9x4"] Nov 25 10:00:00 crc kubenswrapper[4854]: E1125 10:00:00.465170 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fc43285-4e2b-4d05-b52b-b446b200723e" containerName="heat-api" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.465391 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fc43285-4e2b-4d05-b52b-b446b200723e" containerName="heat-api" Nov 25 10:00:00 crc kubenswrapper[4854]: E1125 10:00:00.465518 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db0965e6-0374-4b14-875e-8557c346815e" containerName="heat-cfnapi" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.470173 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="db0965e6-0374-4b14-875e-8557c346815e" containerName="heat-cfnapi" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.470663 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="db0965e6-0374-4b14-875e-8557c346815e" containerName="heat-cfnapi" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.482687 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-dr9x4" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.493639 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.494160 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-7mwfx" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.494502 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.512456 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-dr9x4"] Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.543625 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-795866c477-qft6s"] Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.543920 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-engine-795866c477-qft6s" podUID="fa6d4a82-7a62-446e-bd21-394a6ef687c1" containerName="heat-engine" containerID="cri-o://187e9d86a62fe917a0205f8d650b4d046ab246dc1b2d0cf9a060f1c5e6af8684" gracePeriod=60 Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.579239 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6qqdx\" (UniqueName: \"kubernetes.io/projected/a05bbad4-447a-452c-8254-d2f0a2ea9fdb-kube-api-access-6qqdx\") pod \"nova-cell0-conductor-db-sync-dr9x4\" (UID: \"a05bbad4-447a-452c-8254-d2f0a2ea9fdb\") " pod="openstack/nova-cell0-conductor-db-sync-dr9x4" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.579808 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a05bbad4-447a-452c-8254-d2f0a2ea9fdb-config-data\") pod \"nova-cell0-conductor-db-sync-dr9x4\" (UID: \"a05bbad4-447a-452c-8254-d2f0a2ea9fdb\") " pod="openstack/nova-cell0-conductor-db-sync-dr9x4" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.579907 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a05bbad4-447a-452c-8254-d2f0a2ea9fdb-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-dr9x4\" (UID: \"a05bbad4-447a-452c-8254-d2f0a2ea9fdb\") " pod="openstack/nova-cell0-conductor-db-sync-dr9x4" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.579939 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a05bbad4-447a-452c-8254-d2f0a2ea9fdb-scripts\") pod \"nova-cell0-conductor-db-sync-dr9x4\" (UID: \"a05bbad4-447a-452c-8254-d2f0a2ea9fdb\") " pod="openstack/nova-cell0-conductor-db-sync-dr9x4" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.656804 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2fc43285-4e2b-4d05-b52b-b446b200723e-config-data" (OuterVolumeSpecName: "config-data") pod "2fc43285-4e2b-4d05-b52b-b446b200723e" (UID: "2fc43285-4e2b-4d05-b52b-b446b200723e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.681934 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a05bbad4-447a-452c-8254-d2f0a2ea9fdb-config-data\") pod \"nova-cell0-conductor-db-sync-dr9x4\" (UID: \"a05bbad4-447a-452c-8254-d2f0a2ea9fdb\") " pod="openstack/nova-cell0-conductor-db-sync-dr9x4" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.682089 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a05bbad4-447a-452c-8254-d2f0a2ea9fdb-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-dr9x4\" (UID: \"a05bbad4-447a-452c-8254-d2f0a2ea9fdb\") " pod="openstack/nova-cell0-conductor-db-sync-dr9x4" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.682119 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a05bbad4-447a-452c-8254-d2f0a2ea9fdb-scripts\") pod \"nova-cell0-conductor-db-sync-dr9x4\" (UID: \"a05bbad4-447a-452c-8254-d2f0a2ea9fdb\") " pod="openstack/nova-cell0-conductor-db-sync-dr9x4" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.682152 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6qqdx\" (UniqueName: \"kubernetes.io/projected/a05bbad4-447a-452c-8254-d2f0a2ea9fdb-kube-api-access-6qqdx\") pod \"nova-cell0-conductor-db-sync-dr9x4\" (UID: \"a05bbad4-447a-452c-8254-d2f0a2ea9fdb\") " pod="openstack/nova-cell0-conductor-db-sync-dr9x4" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.682603 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fc43285-4e2b-4d05-b52b-b446b200723e-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.686463 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a05bbad4-447a-452c-8254-d2f0a2ea9fdb-scripts\") pod \"nova-cell0-conductor-db-sync-dr9x4\" (UID: \"a05bbad4-447a-452c-8254-d2f0a2ea9fdb\") " pod="openstack/nova-cell0-conductor-db-sync-dr9x4" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.686722 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a05bbad4-447a-452c-8254-d2f0a2ea9fdb-config-data\") pod \"nova-cell0-conductor-db-sync-dr9x4\" (UID: \"a05bbad4-447a-452c-8254-d2f0a2ea9fdb\") " pod="openstack/nova-cell0-conductor-db-sync-dr9x4" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.690394 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a05bbad4-447a-452c-8254-d2f0a2ea9fdb-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-dr9x4\" (UID: \"a05bbad4-447a-452c-8254-d2f0a2ea9fdb\") " pod="openstack/nova-cell0-conductor-db-sync-dr9x4" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.705254 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6qqdx\" (UniqueName: \"kubernetes.io/projected/a05bbad4-447a-452c-8254-d2f0a2ea9fdb-kube-api-access-6qqdx\") pod \"nova-cell0-conductor-db-sync-dr9x4\" (UID: \"a05bbad4-447a-452c-8254-d2f0a2ea9fdb\") " pod="openstack/nova-cell0-conductor-db-sync-dr9x4" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.835878 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-wnzgj" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.863390 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-dr9x4" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.865406 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-5469c8bfd4-mmvtn"] Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.882439 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-5469c8bfd4-mmvtn"] Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.906602 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"02ae5459-cfb7-4cce-a81b-7c0f28eca1aa","Type":"ContainerStarted","Data":"5405689c495accb714553ee20c23745295532aaaa707d9dd9836f99539841f41"} Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.924190 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-d874db4c8-mzdgt" event={"ID":"2fc43285-4e2b-4d05-b52b-b446b200723e","Type":"ContainerDied","Data":"b522c9910772b6a8384977365882529f9115bb7a4d5911765cda11f2058011c4"} Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.924495 4854 scope.go:117] "RemoveContainer" containerID="51322ecde32cacf588dbcaa0a975a0737f7f0345ccb707018ce0564a3db05c19" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.924640 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-d874db4c8-mzdgt" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.928169 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=6.928159761 podStartE2EDuration="6.928159761s" podCreationTimestamp="2025-11-25 09:59:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:00:00.926847224 +0000 UTC m=+1406.779840630" watchObservedRunningTime="2025-11-25 10:00:00.928159761 +0000 UTC m=+1406.781153137" Nov 25 10:00:00 crc kubenswrapper[4854]: I1125 10:00:00.961453 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"953a22a5-3c0c-402b-a4e1-35dfcea8f92f","Type":"ContainerStarted","Data":"029eba2d51647076fe3de8154a5f94f47e88b13af9677ba86bc91cf88ddc2eff"} Nov 25 10:00:01 crc kubenswrapper[4854]: I1125 10:00:01.028329 4854 generic.go:334] "Generic (PLEG): container finished" podID="ffe5bf34-995f-4ee5-a067-cea929353182" containerID="9811aaa30222aa125f7b74099ab6a86f6e540741251fe8c6228266a7f55abe45" exitCode=2 Nov 25 10:00:01 crc kubenswrapper[4854]: I1125 10:00:01.028371 4854 generic.go:334] "Generic (PLEG): container finished" podID="ffe5bf34-995f-4ee5-a067-cea929353182" containerID="47f91c70fed023e2e3bd5bae11818129ba2c9915e4a2b5ab94fd18c983bfab81" exitCode=0 Nov 25 10:00:01 crc kubenswrapper[4854]: I1125 10:00:01.037360 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=7.037334422 podStartE2EDuration="7.037334422s" podCreationTimestamp="2025-11-25 09:59:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:00:01.004345609 +0000 UTC m=+1406.857339005" watchObservedRunningTime="2025-11-25 10:00:01.037334422 +0000 UTC m=+1406.890327798" Nov 25 10:00:01 crc kubenswrapper[4854]: I1125 10:00:01.056167 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db0965e6-0374-4b14-875e-8557c346815e" path="/var/lib/kubelet/pods/db0965e6-0374-4b14-875e-8557c346815e/volumes" Nov 25 10:00:01 crc kubenswrapper[4854]: I1125 10:00:01.056926 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffe5bf34-995f-4ee5-a067-cea929353182","Type":"ContainerDied","Data":"9811aaa30222aa125f7b74099ab6a86f6e540741251fe8c6228266a7f55abe45"} Nov 25 10:00:01 crc kubenswrapper[4854]: I1125 10:00:01.056956 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffe5bf34-995f-4ee5-a067-cea929353182","Type":"ContainerDied","Data":"47f91c70fed023e2e3bd5bae11818129ba2c9915e4a2b5ab94fd18c983bfab81"} Nov 25 10:00:01 crc kubenswrapper[4854]: I1125 10:00:01.079567 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-d874db4c8-mzdgt"] Nov 25 10:00:01 crc kubenswrapper[4854]: I1125 10:00:01.097643 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-d874db4c8-mzdgt"] Nov 25 10:00:01 crc kubenswrapper[4854]: E1125 10:00:01.339817 4854 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="187e9d86a62fe917a0205f8d650b4d046ab246dc1b2d0cf9a060f1c5e6af8684" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 25 10:00:01 crc kubenswrapper[4854]: E1125 10:00:01.356162 4854 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="187e9d86a62fe917a0205f8d650b4d046ab246dc1b2d0cf9a060f1c5e6af8684" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 25 10:00:01 crc kubenswrapper[4854]: E1125 10:00:01.367888 4854 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="187e9d86a62fe917a0205f8d650b4d046ab246dc1b2d0cf9a060f1c5e6af8684" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 25 10:00:01 crc kubenswrapper[4854]: E1125 10:00:01.367981 4854 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-795866c477-qft6s" podUID="fa6d4a82-7a62-446e-bd21-394a6ef687c1" containerName="heat-engine" Nov 25 10:00:01 crc kubenswrapper[4854]: I1125 10:00:01.772511 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401080-wnzgj"] Nov 25 10:00:01 crc kubenswrapper[4854]: I1125 10:00:01.906539 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-dr9x4"] Nov 25 10:00:02 crc kubenswrapper[4854]: I1125 10:00:02.100887 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-wnzgj" event={"ID":"d61e667f-63aa-47e8-b22e-4a515dc5d81d","Type":"ContainerStarted","Data":"dc63d69a1265ce1f9ddf674f6471dc6bac0c298576eb80993a98637a3e03fa72"} Nov 25 10:00:02 crc kubenswrapper[4854]: I1125 10:00:02.133989 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k7pf8" event={"ID":"180d6ba0-1592-4d2a-be8f-c121b6bf618c","Type":"ContainerStarted","Data":"2c232ef0b916716efc7a0cf054575944afee60d4eee54d0ea98362471a6c33ec"} Nov 25 10:00:02 crc kubenswrapper[4854]: I1125 10:00:02.162835 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-dr9x4" event={"ID":"a05bbad4-447a-452c-8254-d2f0a2ea9fdb","Type":"ContainerStarted","Data":"fd3e2167170f10650f9a064b0176a399be4f9974c96c9c227c669f2100fc7c14"} Nov 25 10:00:02 crc kubenswrapper[4854]: I1125 10:00:02.207022 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-k7pf8" podStartSLOduration=4.277882254 podStartE2EDuration="9.206997693s" podCreationTimestamp="2025-11-25 09:59:53 +0000 UTC" firstStartedPulling="2025-11-25 09:59:55.368718392 +0000 UTC m=+1401.221711768" lastFinishedPulling="2025-11-25 10:00:00.297833831 +0000 UTC m=+1406.150827207" observedRunningTime="2025-11-25 10:00:02.17433236 +0000 UTC m=+1408.027325736" watchObservedRunningTime="2025-11-25 10:00:02.206997693 +0000 UTC m=+1408.059991069" Nov 25 10:00:02 crc kubenswrapper[4854]: I1125 10:00:02.612057 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-xgtsh" Nov 25 10:00:02 crc kubenswrapper[4854]: I1125 10:00:02.675930 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-xgtsh" Nov 25 10:00:03 crc kubenswrapper[4854]: I1125 10:00:03.042488 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2fc43285-4e2b-4d05-b52b-b446b200723e" path="/var/lib/kubelet/pods/2fc43285-4e2b-4d05-b52b-b446b200723e/volumes" Nov 25 10:00:03 crc kubenswrapper[4854]: I1125 10:00:03.199531 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-wnzgj" event={"ID":"d61e667f-63aa-47e8-b22e-4a515dc5d81d","Type":"ContainerStarted","Data":"3a0671fde0f7292553d1434d0381b224c344f383e5cd575a0444463cc7cbf14c"} Nov 25 10:00:03 crc kubenswrapper[4854]: I1125 10:00:03.250567 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-wnzgj" podStartSLOduration=3.250528146 podStartE2EDuration="3.250528146s" podCreationTimestamp="2025-11-25 10:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:00:03.232723233 +0000 UTC m=+1409.085716609" watchObservedRunningTime="2025-11-25 10:00:03.250528146 +0000 UTC m=+1409.103521522" Nov 25 10:00:03 crc kubenswrapper[4854]: I1125 10:00:03.612786 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-k7pf8" Nov 25 10:00:03 crc kubenswrapper[4854]: I1125 10:00:03.612939 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-k7pf8" Nov 25 10:00:03 crc kubenswrapper[4854]: I1125 10:00:03.705914 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-k7pf8" Nov 25 10:00:04 crc kubenswrapper[4854]: I1125 10:00:04.230434 4854 generic.go:334] "Generic (PLEG): container finished" podID="d61e667f-63aa-47e8-b22e-4a515dc5d81d" containerID="3a0671fde0f7292553d1434d0381b224c344f383e5cd575a0444463cc7cbf14c" exitCode=0 Nov 25 10:00:04 crc kubenswrapper[4854]: I1125 10:00:04.230849 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-wnzgj" event={"ID":"d61e667f-63aa-47e8-b22e-4a515dc5d81d","Type":"ContainerDied","Data":"3a0671fde0f7292553d1434d0381b224c344f383e5cd575a0444463cc7cbf14c"} Nov 25 10:00:05 crc kubenswrapper[4854]: I1125 10:00:05.078578 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 25 10:00:05 crc kubenswrapper[4854]: I1125 10:00:05.078616 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 25 10:00:05 crc kubenswrapper[4854]: I1125 10:00:05.156314 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 25 10:00:05 crc kubenswrapper[4854]: I1125 10:00:05.192024 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 25 10:00:05 crc kubenswrapper[4854]: I1125 10:00:05.263916 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 10:00:05 crc kubenswrapper[4854]: I1125 10:00:05.263958 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 10:00:05 crc kubenswrapper[4854]: I1125 10:00:05.347634 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-k7pf8" Nov 25 10:00:05 crc kubenswrapper[4854]: I1125 10:00:05.565943 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 25 10:00:05 crc kubenswrapper[4854]: I1125 10:00:05.565993 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 25 10:00:05 crc kubenswrapper[4854]: I1125 10:00:05.631872 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 25 10:00:05 crc kubenswrapper[4854]: I1125 10:00:05.657613 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 25 10:00:05 crc kubenswrapper[4854]: I1125 10:00:05.760079 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-wnzgj" Nov 25 10:00:05 crc kubenswrapper[4854]: I1125 10:00:05.893316 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d61e667f-63aa-47e8-b22e-4a515dc5d81d-secret-volume\") pod \"d61e667f-63aa-47e8-b22e-4a515dc5d81d\" (UID: \"d61e667f-63aa-47e8-b22e-4a515dc5d81d\") " Nov 25 10:00:05 crc kubenswrapper[4854]: I1125 10:00:05.893427 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d61e667f-63aa-47e8-b22e-4a515dc5d81d-config-volume\") pod \"d61e667f-63aa-47e8-b22e-4a515dc5d81d\" (UID: \"d61e667f-63aa-47e8-b22e-4a515dc5d81d\") " Nov 25 10:00:05 crc kubenswrapper[4854]: I1125 10:00:05.893697 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qqzzp\" (UniqueName: \"kubernetes.io/projected/d61e667f-63aa-47e8-b22e-4a515dc5d81d-kube-api-access-qqzzp\") pod \"d61e667f-63aa-47e8-b22e-4a515dc5d81d\" (UID: \"d61e667f-63aa-47e8-b22e-4a515dc5d81d\") " Nov 25 10:00:05 crc kubenswrapper[4854]: I1125 10:00:05.895340 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d61e667f-63aa-47e8-b22e-4a515dc5d81d-config-volume" (OuterVolumeSpecName: "config-volume") pod "d61e667f-63aa-47e8-b22e-4a515dc5d81d" (UID: "d61e667f-63aa-47e8-b22e-4a515dc5d81d"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:00:05 crc kubenswrapper[4854]: I1125 10:00:05.917528 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d61e667f-63aa-47e8-b22e-4a515dc5d81d-kube-api-access-qqzzp" (OuterVolumeSpecName: "kube-api-access-qqzzp") pod "d61e667f-63aa-47e8-b22e-4a515dc5d81d" (UID: "d61e667f-63aa-47e8-b22e-4a515dc5d81d"). InnerVolumeSpecName "kube-api-access-qqzzp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:00:05 crc kubenswrapper[4854]: I1125 10:00:05.921415 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d61e667f-63aa-47e8-b22e-4a515dc5d81d-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "d61e667f-63aa-47e8-b22e-4a515dc5d81d" (UID: "d61e667f-63aa-47e8-b22e-4a515dc5d81d"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:00:05 crc kubenswrapper[4854]: I1125 10:00:05.998925 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qqzzp\" (UniqueName: \"kubernetes.io/projected/d61e667f-63aa-47e8-b22e-4a515dc5d81d-kube-api-access-qqzzp\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:05 crc kubenswrapper[4854]: I1125 10:00:05.999222 4854 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d61e667f-63aa-47e8-b22e-4a515dc5d81d-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:05 crc kubenswrapper[4854]: I1125 10:00:05.999335 4854 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d61e667f-63aa-47e8-b22e-4a515dc5d81d-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:06 crc kubenswrapper[4854]: I1125 10:00:06.282703 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-wnzgj" Nov 25 10:00:06 crc kubenswrapper[4854]: I1125 10:00:06.284857 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-wnzgj" event={"ID":"d61e667f-63aa-47e8-b22e-4a515dc5d81d","Type":"ContainerDied","Data":"dc63d69a1265ce1f9ddf674f6471dc6bac0c298576eb80993a98637a3e03fa72"} Nov 25 10:00:06 crc kubenswrapper[4854]: I1125 10:00:06.284902 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dc63d69a1265ce1f9ddf674f6471dc6bac0c298576eb80993a98637a3e03fa72" Nov 25 10:00:06 crc kubenswrapper[4854]: I1125 10:00:06.286240 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 25 10:00:06 crc kubenswrapper[4854]: I1125 10:00:06.286276 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 25 10:00:06 crc kubenswrapper[4854]: I1125 10:00:06.625257 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xgtsh"] Nov 25 10:00:06 crc kubenswrapper[4854]: I1125 10:00:06.625647 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-xgtsh" podUID="3405ae80-37c0-433b-99aa-f9e233d61d86" containerName="registry-server" containerID="cri-o://25ca0b9ef2c380451ae442759fa41190958b15a48e7eb1eae65d04f5d58604af" gracePeriod=2 Nov 25 10:00:07 crc kubenswrapper[4854]: I1125 10:00:07.288388 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xgtsh" Nov 25 10:00:07 crc kubenswrapper[4854]: I1125 10:00:07.300054 4854 generic.go:334] "Generic (PLEG): container finished" podID="3405ae80-37c0-433b-99aa-f9e233d61d86" containerID="25ca0b9ef2c380451ae442759fa41190958b15a48e7eb1eae65d04f5d58604af" exitCode=0 Nov 25 10:00:07 crc kubenswrapper[4854]: I1125 10:00:07.300128 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xgtsh" event={"ID":"3405ae80-37c0-433b-99aa-f9e233d61d86","Type":"ContainerDied","Data":"25ca0b9ef2c380451ae442759fa41190958b15a48e7eb1eae65d04f5d58604af"} Nov 25 10:00:07 crc kubenswrapper[4854]: I1125 10:00:07.300203 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xgtsh" event={"ID":"3405ae80-37c0-433b-99aa-f9e233d61d86","Type":"ContainerDied","Data":"f12ac0894e63c6887007774d3cd9777e62797e66bff40c1f69102ff24c419fec"} Nov 25 10:00:07 crc kubenswrapper[4854]: I1125 10:00:07.300226 4854 scope.go:117] "RemoveContainer" containerID="25ca0b9ef2c380451ae442759fa41190958b15a48e7eb1eae65d04f5d58604af" Nov 25 10:00:07 crc kubenswrapper[4854]: I1125 10:00:07.300151 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xgtsh" Nov 25 10:00:07 crc kubenswrapper[4854]: I1125 10:00:07.354510 4854 scope.go:117] "RemoveContainer" containerID="1a50e6d236861d931132b441ad8699eed634e496fe5fcc4a0898dd9d7b9cac0d" Nov 25 10:00:07 crc kubenswrapper[4854]: I1125 10:00:07.388127 4854 scope.go:117] "RemoveContainer" containerID="71492cd3499f60b83df0efaac46c3197a38d6dc4790c49128b37ea8e68cdd155" Nov 25 10:00:07 crc kubenswrapper[4854]: I1125 10:00:07.438283 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3405ae80-37c0-433b-99aa-f9e233d61d86-utilities\") pod \"3405ae80-37c0-433b-99aa-f9e233d61d86\" (UID: \"3405ae80-37c0-433b-99aa-f9e233d61d86\") " Nov 25 10:00:07 crc kubenswrapper[4854]: I1125 10:00:07.438550 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3405ae80-37c0-433b-99aa-f9e233d61d86-catalog-content\") pod \"3405ae80-37c0-433b-99aa-f9e233d61d86\" (UID: \"3405ae80-37c0-433b-99aa-f9e233d61d86\") " Nov 25 10:00:07 crc kubenswrapper[4854]: I1125 10:00:07.438605 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pprf7\" (UniqueName: \"kubernetes.io/projected/3405ae80-37c0-433b-99aa-f9e233d61d86-kube-api-access-pprf7\") pod \"3405ae80-37c0-433b-99aa-f9e233d61d86\" (UID: \"3405ae80-37c0-433b-99aa-f9e233d61d86\") " Nov 25 10:00:07 crc kubenswrapper[4854]: I1125 10:00:07.440711 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3405ae80-37c0-433b-99aa-f9e233d61d86-utilities" (OuterVolumeSpecName: "utilities") pod "3405ae80-37c0-433b-99aa-f9e233d61d86" (UID: "3405ae80-37c0-433b-99aa-f9e233d61d86"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:00:07 crc kubenswrapper[4854]: I1125 10:00:07.441377 4854 scope.go:117] "RemoveContainer" containerID="25ca0b9ef2c380451ae442759fa41190958b15a48e7eb1eae65d04f5d58604af" Nov 25 10:00:07 crc kubenswrapper[4854]: E1125 10:00:07.441958 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"25ca0b9ef2c380451ae442759fa41190958b15a48e7eb1eae65d04f5d58604af\": container with ID starting with 25ca0b9ef2c380451ae442759fa41190958b15a48e7eb1eae65d04f5d58604af not found: ID does not exist" containerID="25ca0b9ef2c380451ae442759fa41190958b15a48e7eb1eae65d04f5d58604af" Nov 25 10:00:07 crc kubenswrapper[4854]: I1125 10:00:07.441986 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"25ca0b9ef2c380451ae442759fa41190958b15a48e7eb1eae65d04f5d58604af"} err="failed to get container status \"25ca0b9ef2c380451ae442759fa41190958b15a48e7eb1eae65d04f5d58604af\": rpc error: code = NotFound desc = could not find container \"25ca0b9ef2c380451ae442759fa41190958b15a48e7eb1eae65d04f5d58604af\": container with ID starting with 25ca0b9ef2c380451ae442759fa41190958b15a48e7eb1eae65d04f5d58604af not found: ID does not exist" Nov 25 10:00:07 crc kubenswrapper[4854]: I1125 10:00:07.442018 4854 scope.go:117] "RemoveContainer" containerID="1a50e6d236861d931132b441ad8699eed634e496fe5fcc4a0898dd9d7b9cac0d" Nov 25 10:00:07 crc kubenswrapper[4854]: E1125 10:00:07.442193 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1a50e6d236861d931132b441ad8699eed634e496fe5fcc4a0898dd9d7b9cac0d\": container with ID starting with 1a50e6d236861d931132b441ad8699eed634e496fe5fcc4a0898dd9d7b9cac0d not found: ID does not exist" containerID="1a50e6d236861d931132b441ad8699eed634e496fe5fcc4a0898dd9d7b9cac0d" Nov 25 10:00:07 crc kubenswrapper[4854]: I1125 10:00:07.442208 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1a50e6d236861d931132b441ad8699eed634e496fe5fcc4a0898dd9d7b9cac0d"} err="failed to get container status \"1a50e6d236861d931132b441ad8699eed634e496fe5fcc4a0898dd9d7b9cac0d\": rpc error: code = NotFound desc = could not find container \"1a50e6d236861d931132b441ad8699eed634e496fe5fcc4a0898dd9d7b9cac0d\": container with ID starting with 1a50e6d236861d931132b441ad8699eed634e496fe5fcc4a0898dd9d7b9cac0d not found: ID does not exist" Nov 25 10:00:07 crc kubenswrapper[4854]: I1125 10:00:07.442220 4854 scope.go:117] "RemoveContainer" containerID="71492cd3499f60b83df0efaac46c3197a38d6dc4790c49128b37ea8e68cdd155" Nov 25 10:00:07 crc kubenswrapper[4854]: E1125 10:00:07.442372 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"71492cd3499f60b83df0efaac46c3197a38d6dc4790c49128b37ea8e68cdd155\": container with ID starting with 71492cd3499f60b83df0efaac46c3197a38d6dc4790c49128b37ea8e68cdd155 not found: ID does not exist" containerID="71492cd3499f60b83df0efaac46c3197a38d6dc4790c49128b37ea8e68cdd155" Nov 25 10:00:07 crc kubenswrapper[4854]: I1125 10:00:07.442394 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"71492cd3499f60b83df0efaac46c3197a38d6dc4790c49128b37ea8e68cdd155"} err="failed to get container status \"71492cd3499f60b83df0efaac46c3197a38d6dc4790c49128b37ea8e68cdd155\": rpc error: code = NotFound desc = could not find container \"71492cd3499f60b83df0efaac46c3197a38d6dc4790c49128b37ea8e68cdd155\": container with ID starting with 71492cd3499f60b83df0efaac46c3197a38d6dc4790c49128b37ea8e68cdd155 not found: ID does not exist" Nov 25 10:00:07 crc kubenswrapper[4854]: I1125 10:00:07.446506 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3405ae80-37c0-433b-99aa-f9e233d61d86-kube-api-access-pprf7" (OuterVolumeSpecName: "kube-api-access-pprf7") pod "3405ae80-37c0-433b-99aa-f9e233d61d86" (UID: "3405ae80-37c0-433b-99aa-f9e233d61d86"). InnerVolumeSpecName "kube-api-access-pprf7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:00:07 crc kubenswrapper[4854]: I1125 10:00:07.509400 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3405ae80-37c0-433b-99aa-f9e233d61d86-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3405ae80-37c0-433b-99aa-f9e233d61d86" (UID: "3405ae80-37c0-433b-99aa-f9e233d61d86"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:00:07 crc kubenswrapper[4854]: I1125 10:00:07.541955 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3405ae80-37c0-433b-99aa-f9e233d61d86-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:07 crc kubenswrapper[4854]: I1125 10:00:07.541990 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3405ae80-37c0-433b-99aa-f9e233d61d86-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:07 crc kubenswrapper[4854]: I1125 10:00:07.542003 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pprf7\" (UniqueName: \"kubernetes.io/projected/3405ae80-37c0-433b-99aa-f9e233d61d86-kube-api-access-pprf7\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:07 crc kubenswrapper[4854]: I1125 10:00:07.638406 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xgtsh"] Nov 25 10:00:07 crc kubenswrapper[4854]: I1125 10:00:07.677933 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-xgtsh"] Nov 25 10:00:08 crc kubenswrapper[4854]: I1125 10:00:08.672392 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 25 10:00:08 crc kubenswrapper[4854]: I1125 10:00:08.672851 4854 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 10:00:08 crc kubenswrapper[4854]: I1125 10:00:08.694301 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 25 10:00:08 crc kubenswrapper[4854]: I1125 10:00:08.694635 4854 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 10:00:08 crc kubenswrapper[4854]: I1125 10:00:08.699831 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 25 10:00:08 crc kubenswrapper[4854]: I1125 10:00:08.702878 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 25 10:00:09 crc kubenswrapper[4854]: I1125 10:00:09.040789 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3405ae80-37c0-433b-99aa-f9e233d61d86" path="/var/lib/kubelet/pods/3405ae80-37c0-433b-99aa-f9e233d61d86/volumes" Nov 25 10:00:11 crc kubenswrapper[4854]: E1125 10:00:11.329232 4854 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="187e9d86a62fe917a0205f8d650b4d046ab246dc1b2d0cf9a060f1c5e6af8684" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 25 10:00:11 crc kubenswrapper[4854]: E1125 10:00:11.332651 4854 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="187e9d86a62fe917a0205f8d650b4d046ab246dc1b2d0cf9a060f1c5e6af8684" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 25 10:00:11 crc kubenswrapper[4854]: E1125 10:00:11.336595 4854 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="187e9d86a62fe917a0205f8d650b4d046ab246dc1b2d0cf9a060f1c5e6af8684" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 25 10:00:11 crc kubenswrapper[4854]: E1125 10:00:11.336721 4854 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-795866c477-qft6s" podUID="fa6d4a82-7a62-446e-bd21-394a6ef687c1" containerName="heat-engine" Nov 25 10:00:11 crc kubenswrapper[4854]: I1125 10:00:11.420258 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-k7pf8"] Nov 25 10:00:11 crc kubenswrapper[4854]: I1125 10:00:11.420568 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-k7pf8" podUID="180d6ba0-1592-4d2a-be8f-c121b6bf618c" containerName="registry-server" containerID="cri-o://2c232ef0b916716efc7a0cf054575944afee60d4eee54d0ea98362471a6c33ec" gracePeriod=2 Nov 25 10:00:12 crc kubenswrapper[4854]: I1125 10:00:12.385904 4854 generic.go:334] "Generic (PLEG): container finished" podID="180d6ba0-1592-4d2a-be8f-c121b6bf618c" containerID="2c232ef0b916716efc7a0cf054575944afee60d4eee54d0ea98362471a6c33ec" exitCode=0 Nov 25 10:00:12 crc kubenswrapper[4854]: I1125 10:00:12.386102 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k7pf8" event={"ID":"180d6ba0-1592-4d2a-be8f-c121b6bf618c","Type":"ContainerDied","Data":"2c232ef0b916716efc7a0cf054575944afee60d4eee54d0ea98362471a6c33ec"} Nov 25 10:00:12 crc kubenswrapper[4854]: I1125 10:00:12.387994 4854 generic.go:334] "Generic (PLEG): container finished" podID="fa6d4a82-7a62-446e-bd21-394a6ef687c1" containerID="187e9d86a62fe917a0205f8d650b4d046ab246dc1b2d0cf9a060f1c5e6af8684" exitCode=0 Nov 25 10:00:12 crc kubenswrapper[4854]: I1125 10:00:12.388036 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-795866c477-qft6s" event={"ID":"fa6d4a82-7a62-446e-bd21-394a6ef687c1","Type":"ContainerDied","Data":"187e9d86a62fe917a0205f8d650b4d046ab246dc1b2d0cf9a060f1c5e6af8684"} Nov 25 10:00:13 crc kubenswrapper[4854]: I1125 10:00:13.403847 4854 generic.go:334] "Generic (PLEG): container finished" podID="ffe5bf34-995f-4ee5-a067-cea929353182" containerID="54528e11ec186c07e1c21454810fdc80e8d8671e4a47f5d5fbe7695989b5cdee" exitCode=0 Nov 25 10:00:13 crc kubenswrapper[4854]: I1125 10:00:13.404025 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffe5bf34-995f-4ee5-a067-cea929353182","Type":"ContainerDied","Data":"54528e11ec186c07e1c21454810fdc80e8d8671e4a47f5d5fbe7695989b5cdee"} Nov 25 10:00:13 crc kubenswrapper[4854]: E1125 10:00:13.612322 4854 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 2c232ef0b916716efc7a0cf054575944afee60d4eee54d0ea98362471a6c33ec is running failed: container process not found" containerID="2c232ef0b916716efc7a0cf054575944afee60d4eee54d0ea98362471a6c33ec" cmd=["grpc_health_probe","-addr=:50051"] Nov 25 10:00:13 crc kubenswrapper[4854]: E1125 10:00:13.613025 4854 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 2c232ef0b916716efc7a0cf054575944afee60d4eee54d0ea98362471a6c33ec is running failed: container process not found" containerID="2c232ef0b916716efc7a0cf054575944afee60d4eee54d0ea98362471a6c33ec" cmd=["grpc_health_probe","-addr=:50051"] Nov 25 10:00:13 crc kubenswrapper[4854]: E1125 10:00:13.613523 4854 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 2c232ef0b916716efc7a0cf054575944afee60d4eee54d0ea98362471a6c33ec is running failed: container process not found" containerID="2c232ef0b916716efc7a0cf054575944afee60d4eee54d0ea98362471a6c33ec" cmd=["grpc_health_probe","-addr=:50051"] Nov 25 10:00:13 crc kubenswrapper[4854]: E1125 10:00:13.613566 4854 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 2c232ef0b916716efc7a0cf054575944afee60d4eee54d0ea98362471a6c33ec is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/certified-operators-k7pf8" podUID="180d6ba0-1592-4d2a-be8f-c121b6bf618c" containerName="registry-server" Nov 25 10:00:15 crc kubenswrapper[4854]: I1125 10:00:15.434012 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k7pf8" event={"ID":"180d6ba0-1592-4d2a-be8f-c121b6bf618c","Type":"ContainerDied","Data":"620e7ec9905747b78dd214fa569060c718a753467af80ed442450ad884c82164"} Nov 25 10:00:15 crc kubenswrapper[4854]: I1125 10:00:15.434525 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="620e7ec9905747b78dd214fa569060c718a753467af80ed442450ad884c82164" Nov 25 10:00:15 crc kubenswrapper[4854]: I1125 10:00:15.435561 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-795866c477-qft6s" event={"ID":"fa6d4a82-7a62-446e-bd21-394a6ef687c1","Type":"ContainerDied","Data":"76ddaf799a261b67eeb26ea288e56906d54ac8b7220c1da1464bde02c7346207"} Nov 25 10:00:15 crc kubenswrapper[4854]: I1125 10:00:15.435580 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="76ddaf799a261b67eeb26ea288e56906d54ac8b7220c1da1464bde02c7346207" Nov 25 10:00:15 crc kubenswrapper[4854]: I1125 10:00:15.447615 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k7pf8" Nov 25 10:00:15 crc kubenswrapper[4854]: I1125 10:00:15.459003 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-795866c477-qft6s" Nov 25 10:00:15 crc kubenswrapper[4854]: I1125 10:00:15.552481 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa6d4a82-7a62-446e-bd21-394a6ef687c1-combined-ca-bundle\") pod \"fa6d4a82-7a62-446e-bd21-394a6ef687c1\" (UID: \"fa6d4a82-7a62-446e-bd21-394a6ef687c1\") " Nov 25 10:00:15 crc kubenswrapper[4854]: I1125 10:00:15.553404 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2k2k8\" (UniqueName: \"kubernetes.io/projected/180d6ba0-1592-4d2a-be8f-c121b6bf618c-kube-api-access-2k2k8\") pod \"180d6ba0-1592-4d2a-be8f-c121b6bf618c\" (UID: \"180d6ba0-1592-4d2a-be8f-c121b6bf618c\") " Nov 25 10:00:15 crc kubenswrapper[4854]: I1125 10:00:15.553537 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-45x6q\" (UniqueName: \"kubernetes.io/projected/fa6d4a82-7a62-446e-bd21-394a6ef687c1-kube-api-access-45x6q\") pod \"fa6d4a82-7a62-446e-bd21-394a6ef687c1\" (UID: \"fa6d4a82-7a62-446e-bd21-394a6ef687c1\") " Nov 25 10:00:15 crc kubenswrapper[4854]: I1125 10:00:15.553830 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/180d6ba0-1592-4d2a-be8f-c121b6bf618c-catalog-content\") pod \"180d6ba0-1592-4d2a-be8f-c121b6bf618c\" (UID: \"180d6ba0-1592-4d2a-be8f-c121b6bf618c\") " Nov 25 10:00:15 crc kubenswrapper[4854]: I1125 10:00:15.553956 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/180d6ba0-1592-4d2a-be8f-c121b6bf618c-utilities\") pod \"180d6ba0-1592-4d2a-be8f-c121b6bf618c\" (UID: \"180d6ba0-1592-4d2a-be8f-c121b6bf618c\") " Nov 25 10:00:15 crc kubenswrapper[4854]: I1125 10:00:15.554103 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fa6d4a82-7a62-446e-bd21-394a6ef687c1-config-data-custom\") pod \"fa6d4a82-7a62-446e-bd21-394a6ef687c1\" (UID: \"fa6d4a82-7a62-446e-bd21-394a6ef687c1\") " Nov 25 10:00:15 crc kubenswrapper[4854]: I1125 10:00:15.554225 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa6d4a82-7a62-446e-bd21-394a6ef687c1-config-data\") pod \"fa6d4a82-7a62-446e-bd21-394a6ef687c1\" (UID: \"fa6d4a82-7a62-446e-bd21-394a6ef687c1\") " Nov 25 10:00:15 crc kubenswrapper[4854]: I1125 10:00:15.554406 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/180d6ba0-1592-4d2a-be8f-c121b6bf618c-utilities" (OuterVolumeSpecName: "utilities") pod "180d6ba0-1592-4d2a-be8f-c121b6bf618c" (UID: "180d6ba0-1592-4d2a-be8f-c121b6bf618c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:00:15 crc kubenswrapper[4854]: I1125 10:00:15.555193 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/180d6ba0-1592-4d2a-be8f-c121b6bf618c-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:15 crc kubenswrapper[4854]: I1125 10:00:15.559871 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa6d4a82-7a62-446e-bd21-394a6ef687c1-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "fa6d4a82-7a62-446e-bd21-394a6ef687c1" (UID: "fa6d4a82-7a62-446e-bd21-394a6ef687c1"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:00:15 crc kubenswrapper[4854]: I1125 10:00:15.576715 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa6d4a82-7a62-446e-bd21-394a6ef687c1-kube-api-access-45x6q" (OuterVolumeSpecName: "kube-api-access-45x6q") pod "fa6d4a82-7a62-446e-bd21-394a6ef687c1" (UID: "fa6d4a82-7a62-446e-bd21-394a6ef687c1"). InnerVolumeSpecName "kube-api-access-45x6q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:00:15 crc kubenswrapper[4854]: I1125 10:00:15.581446 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/180d6ba0-1592-4d2a-be8f-c121b6bf618c-kube-api-access-2k2k8" (OuterVolumeSpecName: "kube-api-access-2k2k8") pod "180d6ba0-1592-4d2a-be8f-c121b6bf618c" (UID: "180d6ba0-1592-4d2a-be8f-c121b6bf618c"). InnerVolumeSpecName "kube-api-access-2k2k8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:00:15 crc kubenswrapper[4854]: I1125 10:00:15.616737 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/180d6ba0-1592-4d2a-be8f-c121b6bf618c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "180d6ba0-1592-4d2a-be8f-c121b6bf618c" (UID: "180d6ba0-1592-4d2a-be8f-c121b6bf618c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:00:15 crc kubenswrapper[4854]: I1125 10:00:15.640148 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa6d4a82-7a62-446e-bd21-394a6ef687c1-config-data" (OuterVolumeSpecName: "config-data") pod "fa6d4a82-7a62-446e-bd21-394a6ef687c1" (UID: "fa6d4a82-7a62-446e-bd21-394a6ef687c1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:00:15 crc kubenswrapper[4854]: I1125 10:00:15.645758 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa6d4a82-7a62-446e-bd21-394a6ef687c1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fa6d4a82-7a62-446e-bd21-394a6ef687c1" (UID: "fa6d4a82-7a62-446e-bd21-394a6ef687c1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:00:15 crc kubenswrapper[4854]: I1125 10:00:15.657765 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2k2k8\" (UniqueName: \"kubernetes.io/projected/180d6ba0-1592-4d2a-be8f-c121b6bf618c-kube-api-access-2k2k8\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:15 crc kubenswrapper[4854]: I1125 10:00:15.657808 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-45x6q\" (UniqueName: \"kubernetes.io/projected/fa6d4a82-7a62-446e-bd21-394a6ef687c1-kube-api-access-45x6q\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:15 crc kubenswrapper[4854]: I1125 10:00:15.657818 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/180d6ba0-1592-4d2a-be8f-c121b6bf618c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:15 crc kubenswrapper[4854]: I1125 10:00:15.657827 4854 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fa6d4a82-7a62-446e-bd21-394a6ef687c1-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:15 crc kubenswrapper[4854]: I1125 10:00:15.657839 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa6d4a82-7a62-446e-bd21-394a6ef687c1-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:15 crc kubenswrapper[4854]: I1125 10:00:15.657848 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa6d4a82-7a62-446e-bd21-394a6ef687c1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:16 crc kubenswrapper[4854]: I1125 10:00:16.455929 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k7pf8" Nov 25 10:00:16 crc kubenswrapper[4854]: I1125 10:00:16.455923 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-dr9x4" event={"ID":"a05bbad4-447a-452c-8254-d2f0a2ea9fdb","Type":"ContainerStarted","Data":"dbb617cf9bad852c619e8a70e79b8d94465b864ab151f06fa54fade7929f52f1"} Nov 25 10:00:16 crc kubenswrapper[4854]: I1125 10:00:16.456038 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-795866c477-qft6s" Nov 25 10:00:16 crc kubenswrapper[4854]: I1125 10:00:16.488728 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-dr9x4" podStartSLOduration=3.060437477 podStartE2EDuration="16.48870794s" podCreationTimestamp="2025-11-25 10:00:00 +0000 UTC" firstStartedPulling="2025-11-25 10:00:01.871957574 +0000 UTC m=+1407.724950950" lastFinishedPulling="2025-11-25 10:00:15.300228037 +0000 UTC m=+1421.153221413" observedRunningTime="2025-11-25 10:00:16.479237108 +0000 UTC m=+1422.332230484" watchObservedRunningTime="2025-11-25 10:00:16.48870794 +0000 UTC m=+1422.341701316" Nov 25 10:00:16 crc kubenswrapper[4854]: I1125 10:00:16.519925 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-795866c477-qft6s"] Nov 25 10:00:16 crc kubenswrapper[4854]: I1125 10:00:16.534925 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-engine-795866c477-qft6s"] Nov 25 10:00:16 crc kubenswrapper[4854]: I1125 10:00:16.550610 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-k7pf8"] Nov 25 10:00:16 crc kubenswrapper[4854]: I1125 10:00:16.563743 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-k7pf8"] Nov 25 10:00:17 crc kubenswrapper[4854]: I1125 10:00:17.029637 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="180d6ba0-1592-4d2a-be8f-c121b6bf618c" path="/var/lib/kubelet/pods/180d6ba0-1592-4d2a-be8f-c121b6bf618c/volumes" Nov 25 10:00:17 crc kubenswrapper[4854]: I1125 10:00:17.030289 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fa6d4a82-7a62-446e-bd21-394a6ef687c1" path="/var/lib/kubelet/pods/fa6d4a82-7a62-446e-bd21-394a6ef687c1/volumes" Nov 25 10:00:20 crc kubenswrapper[4854]: I1125 10:00:20.958464 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="ffe5bf34-995f-4ee5-a067-cea929353182" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 25 10:00:22 crc kubenswrapper[4854]: I1125 10:00:22.669643 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-external-api-0" podUID="c67dbcea-a3b9-46ac-833c-97595c61756e" containerName="glance-log" probeResult="failure" output="Get \"https://10.217.0.190:9292/healthcheck\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 10:00:22 crc kubenswrapper[4854]: I1125 10:00:22.669739 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-external-api-0" podUID="c67dbcea-a3b9-46ac-833c-97595c61756e" containerName="glance-httpd" probeResult="failure" output="Get \"https://10.217.0.190:9292/healthcheck\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 10:00:22 crc kubenswrapper[4854]: I1125 10:00:22.966453 4854 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","podb8f14245-4267-4921-996d-6d192b4c9953"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort podb8f14245-4267-4921-996d-6d192b4c9953] : Timed out while waiting for systemd to remove kubepods-besteffort-podb8f14245_4267_4921_996d_6d192b4c9953.slice" Nov 25 10:00:22 crc kubenswrapper[4854]: E1125 10:00:22.966837 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to delete cgroup paths for [kubepods besteffort podb8f14245-4267-4921-996d-6d192b4c9953] : unable to destroy cgroup paths for cgroup [kubepods besteffort podb8f14245-4267-4921-996d-6d192b4c9953] : Timed out while waiting for systemd to remove kubepods-besteffort-podb8f14245_4267_4921_996d_6d192b4c9953.slice" pod="openstack/heat-api-55dc74c94f-t88f4" podUID="b8f14245-4267-4921-996d-6d192b4c9953" Nov 25 10:00:22 crc kubenswrapper[4854]: I1125 10:00:22.972147 4854 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod964f5abd-0c6c-47cf-82ca-ea31aaf2b522"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod964f5abd-0c6c-47cf-82ca-ea31aaf2b522] : Timed out while waiting for systemd to remove kubepods-besteffort-pod964f5abd_0c6c_47cf_82ca_ea31aaf2b522.slice" Nov 25 10:00:22 crc kubenswrapper[4854]: E1125 10:00:22.972193 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to delete cgroup paths for [kubepods besteffort pod964f5abd-0c6c-47cf-82ca-ea31aaf2b522] : unable to destroy cgroup paths for cgroup [kubepods besteffort pod964f5abd-0c6c-47cf-82ca-ea31aaf2b522] : Timed out while waiting for systemd to remove kubepods-besteffort-pod964f5abd_0c6c_47cf_82ca_ea31aaf2b522.slice" pod="openstack/heat-cfnapi-6d5696cb69-gnxts" podUID="964f5abd-0c6c-47cf-82ca-ea31aaf2b522" Nov 25 10:00:23 crc kubenswrapper[4854]: I1125 10:00:23.049589 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-6d5696cb69-gnxts" Nov 25 10:00:23 crc kubenswrapper[4854]: I1125 10:00:23.049606 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-55dc74c94f-t88f4" Nov 25 10:00:23 crc kubenswrapper[4854]: I1125 10:00:23.087509 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-55dc74c94f-t88f4"] Nov 25 10:00:23 crc kubenswrapper[4854]: I1125 10:00:23.099183 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-55dc74c94f-t88f4"] Nov 25 10:00:23 crc kubenswrapper[4854]: I1125 10:00:23.109852 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-6d5696cb69-gnxts"] Nov 25 10:00:23 crc kubenswrapper[4854]: I1125 10:00:23.120562 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-6d5696cb69-gnxts"] Nov 25 10:00:25 crc kubenswrapper[4854]: I1125 10:00:25.029469 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="964f5abd-0c6c-47cf-82ca-ea31aaf2b522" path="/var/lib/kubelet/pods/964f5abd-0c6c-47cf-82ca-ea31aaf2b522/volumes" Nov 25 10:00:25 crc kubenswrapper[4854]: I1125 10:00:25.031549 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b8f14245-4267-4921-996d-6d192b4c9953" path="/var/lib/kubelet/pods/b8f14245-4267-4921-996d-6d192b4c9953/volumes" Nov 25 10:00:26 crc kubenswrapper[4854]: I1125 10:00:26.080554 4854 generic.go:334] "Generic (PLEG): container finished" podID="a05bbad4-447a-452c-8254-d2f0a2ea9fdb" containerID="dbb617cf9bad852c619e8a70e79b8d94465b864ab151f06fa54fade7929f52f1" exitCode=0 Nov 25 10:00:26 crc kubenswrapper[4854]: I1125 10:00:26.080891 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-dr9x4" event={"ID":"a05bbad4-447a-452c-8254-d2f0a2ea9fdb","Type":"ContainerDied","Data":"dbb617cf9bad852c619e8a70e79b8d94465b864ab151f06fa54fade7929f52f1"} Nov 25 10:00:27 crc kubenswrapper[4854]: I1125 10:00:27.531051 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-dr9x4" Nov 25 10:00:27 crc kubenswrapper[4854]: I1125 10:00:27.671333 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a05bbad4-447a-452c-8254-d2f0a2ea9fdb-config-data\") pod \"a05bbad4-447a-452c-8254-d2f0a2ea9fdb\" (UID: \"a05bbad4-447a-452c-8254-d2f0a2ea9fdb\") " Nov 25 10:00:27 crc kubenswrapper[4854]: I1125 10:00:27.671385 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6qqdx\" (UniqueName: \"kubernetes.io/projected/a05bbad4-447a-452c-8254-d2f0a2ea9fdb-kube-api-access-6qqdx\") pod \"a05bbad4-447a-452c-8254-d2f0a2ea9fdb\" (UID: \"a05bbad4-447a-452c-8254-d2f0a2ea9fdb\") " Nov 25 10:00:27 crc kubenswrapper[4854]: I1125 10:00:27.671455 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a05bbad4-447a-452c-8254-d2f0a2ea9fdb-combined-ca-bundle\") pod \"a05bbad4-447a-452c-8254-d2f0a2ea9fdb\" (UID: \"a05bbad4-447a-452c-8254-d2f0a2ea9fdb\") " Nov 25 10:00:27 crc kubenswrapper[4854]: I1125 10:00:27.671551 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a05bbad4-447a-452c-8254-d2f0a2ea9fdb-scripts\") pod \"a05bbad4-447a-452c-8254-d2f0a2ea9fdb\" (UID: \"a05bbad4-447a-452c-8254-d2f0a2ea9fdb\") " Nov 25 10:00:27 crc kubenswrapper[4854]: I1125 10:00:27.677548 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a05bbad4-447a-452c-8254-d2f0a2ea9fdb-kube-api-access-6qqdx" (OuterVolumeSpecName: "kube-api-access-6qqdx") pod "a05bbad4-447a-452c-8254-d2f0a2ea9fdb" (UID: "a05bbad4-447a-452c-8254-d2f0a2ea9fdb"). InnerVolumeSpecName "kube-api-access-6qqdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:00:27 crc kubenswrapper[4854]: I1125 10:00:27.680472 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a05bbad4-447a-452c-8254-d2f0a2ea9fdb-scripts" (OuterVolumeSpecName: "scripts") pod "a05bbad4-447a-452c-8254-d2f0a2ea9fdb" (UID: "a05bbad4-447a-452c-8254-d2f0a2ea9fdb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:00:27 crc kubenswrapper[4854]: I1125 10:00:27.707759 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a05bbad4-447a-452c-8254-d2f0a2ea9fdb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a05bbad4-447a-452c-8254-d2f0a2ea9fdb" (UID: "a05bbad4-447a-452c-8254-d2f0a2ea9fdb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:00:27 crc kubenswrapper[4854]: I1125 10:00:27.716386 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a05bbad4-447a-452c-8254-d2f0a2ea9fdb-config-data" (OuterVolumeSpecName: "config-data") pod "a05bbad4-447a-452c-8254-d2f0a2ea9fdb" (UID: "a05bbad4-447a-452c-8254-d2f0a2ea9fdb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:00:27 crc kubenswrapper[4854]: I1125 10:00:27.774204 4854 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a05bbad4-447a-452c-8254-d2f0a2ea9fdb-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:27 crc kubenswrapper[4854]: I1125 10:00:27.774249 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a05bbad4-447a-452c-8254-d2f0a2ea9fdb-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:27 crc kubenswrapper[4854]: I1125 10:00:27.774266 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6qqdx\" (UniqueName: \"kubernetes.io/projected/a05bbad4-447a-452c-8254-d2f0a2ea9fdb-kube-api-access-6qqdx\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:27 crc kubenswrapper[4854]: I1125 10:00:27.774280 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a05bbad4-447a-452c-8254-d2f0a2ea9fdb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:28 crc kubenswrapper[4854]: I1125 10:00:28.106088 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-dr9x4" event={"ID":"a05bbad4-447a-452c-8254-d2f0a2ea9fdb","Type":"ContainerDied","Data":"fd3e2167170f10650f9a064b0176a399be4f9974c96c9c227c669f2100fc7c14"} Nov 25 10:00:28 crc kubenswrapper[4854]: I1125 10:00:28.106133 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fd3e2167170f10650f9a064b0176a399be4f9974c96c9c227c669f2100fc7c14" Nov 25 10:00:28 crc kubenswrapper[4854]: I1125 10:00:28.106150 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-dr9x4" Nov 25 10:00:28 crc kubenswrapper[4854]: I1125 10:00:28.216872 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 10:00:28 crc kubenswrapper[4854]: E1125 10:00:28.217419 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="180d6ba0-1592-4d2a-be8f-c121b6bf618c" containerName="registry-server" Nov 25 10:00:28 crc kubenswrapper[4854]: I1125 10:00:28.217437 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="180d6ba0-1592-4d2a-be8f-c121b6bf618c" containerName="registry-server" Nov 25 10:00:28 crc kubenswrapper[4854]: E1125 10:00:28.217451 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="180d6ba0-1592-4d2a-be8f-c121b6bf618c" containerName="extract-utilities" Nov 25 10:00:28 crc kubenswrapper[4854]: I1125 10:00:28.217457 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="180d6ba0-1592-4d2a-be8f-c121b6bf618c" containerName="extract-utilities" Nov 25 10:00:28 crc kubenswrapper[4854]: E1125 10:00:28.217469 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3405ae80-37c0-433b-99aa-f9e233d61d86" containerName="registry-server" Nov 25 10:00:28 crc kubenswrapper[4854]: I1125 10:00:28.217475 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="3405ae80-37c0-433b-99aa-f9e233d61d86" containerName="registry-server" Nov 25 10:00:28 crc kubenswrapper[4854]: E1125 10:00:28.217483 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d61e667f-63aa-47e8-b22e-4a515dc5d81d" containerName="collect-profiles" Nov 25 10:00:28 crc kubenswrapper[4854]: I1125 10:00:28.217489 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="d61e667f-63aa-47e8-b22e-4a515dc5d81d" containerName="collect-profiles" Nov 25 10:00:28 crc kubenswrapper[4854]: E1125 10:00:28.217507 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a05bbad4-447a-452c-8254-d2f0a2ea9fdb" containerName="nova-cell0-conductor-db-sync" Nov 25 10:00:28 crc kubenswrapper[4854]: I1125 10:00:28.217513 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="a05bbad4-447a-452c-8254-d2f0a2ea9fdb" containerName="nova-cell0-conductor-db-sync" Nov 25 10:00:28 crc kubenswrapper[4854]: E1125 10:00:28.217531 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa6d4a82-7a62-446e-bd21-394a6ef687c1" containerName="heat-engine" Nov 25 10:00:28 crc kubenswrapper[4854]: I1125 10:00:28.217537 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa6d4a82-7a62-446e-bd21-394a6ef687c1" containerName="heat-engine" Nov 25 10:00:28 crc kubenswrapper[4854]: E1125 10:00:28.217548 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3405ae80-37c0-433b-99aa-f9e233d61d86" containerName="extract-utilities" Nov 25 10:00:28 crc kubenswrapper[4854]: I1125 10:00:28.217554 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="3405ae80-37c0-433b-99aa-f9e233d61d86" containerName="extract-utilities" Nov 25 10:00:28 crc kubenswrapper[4854]: E1125 10:00:28.217576 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3405ae80-37c0-433b-99aa-f9e233d61d86" containerName="extract-content" Nov 25 10:00:28 crc kubenswrapper[4854]: I1125 10:00:28.217593 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="3405ae80-37c0-433b-99aa-f9e233d61d86" containerName="extract-content" Nov 25 10:00:28 crc kubenswrapper[4854]: E1125 10:00:28.217619 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="180d6ba0-1592-4d2a-be8f-c121b6bf618c" containerName="extract-content" Nov 25 10:00:28 crc kubenswrapper[4854]: I1125 10:00:28.217626 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="180d6ba0-1592-4d2a-be8f-c121b6bf618c" containerName="extract-content" Nov 25 10:00:28 crc kubenswrapper[4854]: I1125 10:00:28.217852 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="d61e667f-63aa-47e8-b22e-4a515dc5d81d" containerName="collect-profiles" Nov 25 10:00:28 crc kubenswrapper[4854]: I1125 10:00:28.217864 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="3405ae80-37c0-433b-99aa-f9e233d61d86" containerName="registry-server" Nov 25 10:00:28 crc kubenswrapper[4854]: I1125 10:00:28.217882 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="180d6ba0-1592-4d2a-be8f-c121b6bf618c" containerName="registry-server" Nov 25 10:00:28 crc kubenswrapper[4854]: I1125 10:00:28.217894 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa6d4a82-7a62-446e-bd21-394a6ef687c1" containerName="heat-engine" Nov 25 10:00:28 crc kubenswrapper[4854]: I1125 10:00:28.217904 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="a05bbad4-447a-452c-8254-d2f0a2ea9fdb" containerName="nova-cell0-conductor-db-sync" Nov 25 10:00:28 crc kubenswrapper[4854]: I1125 10:00:28.218883 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 25 10:00:28 crc kubenswrapper[4854]: I1125 10:00:28.220992 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 25 10:00:28 crc kubenswrapper[4854]: I1125 10:00:28.221599 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-7mwfx" Nov 25 10:00:28 crc kubenswrapper[4854]: I1125 10:00:28.265896 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 10:00:28 crc kubenswrapper[4854]: I1125 10:00:28.387534 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d267893d-1cd2-420b-a2ad-b95f2e2729e1-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"d267893d-1cd2-420b-a2ad-b95f2e2729e1\") " pod="openstack/nova-cell0-conductor-0" Nov 25 10:00:28 crc kubenswrapper[4854]: I1125 10:00:28.388054 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d267893d-1cd2-420b-a2ad-b95f2e2729e1-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"d267893d-1cd2-420b-a2ad-b95f2e2729e1\") " pod="openstack/nova-cell0-conductor-0" Nov 25 10:00:28 crc kubenswrapper[4854]: I1125 10:00:28.388131 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j8trm\" (UniqueName: \"kubernetes.io/projected/d267893d-1cd2-420b-a2ad-b95f2e2729e1-kube-api-access-j8trm\") pod \"nova-cell0-conductor-0\" (UID: \"d267893d-1cd2-420b-a2ad-b95f2e2729e1\") " pod="openstack/nova-cell0-conductor-0" Nov 25 10:00:28 crc kubenswrapper[4854]: I1125 10:00:28.490158 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d267893d-1cd2-420b-a2ad-b95f2e2729e1-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"d267893d-1cd2-420b-a2ad-b95f2e2729e1\") " pod="openstack/nova-cell0-conductor-0" Nov 25 10:00:28 crc kubenswrapper[4854]: I1125 10:00:28.490418 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d267893d-1cd2-420b-a2ad-b95f2e2729e1-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"d267893d-1cd2-420b-a2ad-b95f2e2729e1\") " pod="openstack/nova-cell0-conductor-0" Nov 25 10:00:28 crc kubenswrapper[4854]: I1125 10:00:28.490505 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j8trm\" (UniqueName: \"kubernetes.io/projected/d267893d-1cd2-420b-a2ad-b95f2e2729e1-kube-api-access-j8trm\") pod \"nova-cell0-conductor-0\" (UID: \"d267893d-1cd2-420b-a2ad-b95f2e2729e1\") " pod="openstack/nova-cell0-conductor-0" Nov 25 10:00:28 crc kubenswrapper[4854]: I1125 10:00:28.496521 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d267893d-1cd2-420b-a2ad-b95f2e2729e1-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"d267893d-1cd2-420b-a2ad-b95f2e2729e1\") " pod="openstack/nova-cell0-conductor-0" Nov 25 10:00:28 crc kubenswrapper[4854]: I1125 10:00:28.498484 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d267893d-1cd2-420b-a2ad-b95f2e2729e1-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"d267893d-1cd2-420b-a2ad-b95f2e2729e1\") " pod="openstack/nova-cell0-conductor-0" Nov 25 10:00:28 crc kubenswrapper[4854]: I1125 10:00:28.506443 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j8trm\" (UniqueName: \"kubernetes.io/projected/d267893d-1cd2-420b-a2ad-b95f2e2729e1-kube-api-access-j8trm\") pod \"nova-cell0-conductor-0\" (UID: \"d267893d-1cd2-420b-a2ad-b95f2e2729e1\") " pod="openstack/nova-cell0-conductor-0" Nov 25 10:00:28 crc kubenswrapper[4854]: I1125 10:00:28.574104 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 25 10:00:29 crc kubenswrapper[4854]: I1125 10:00:29.051320 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 10:00:29 crc kubenswrapper[4854]: I1125 10:00:29.123817 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"d267893d-1cd2-420b-a2ad-b95f2e2729e1","Type":"ContainerStarted","Data":"45f5a849dd92a2d9e29775848c659aca6431bc73aed051725b54726a1ea74311"} Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.153240 4854 generic.go:334] "Generic (PLEG): container finished" podID="ffe5bf34-995f-4ee5-a067-cea929353182" containerID="15ec4b0784e877ae274026289ab0d6762159e4676f2290f939a6caf0583ce7f5" exitCode=137 Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.153430 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffe5bf34-995f-4ee5-a067-cea929353182","Type":"ContainerDied","Data":"15ec4b0784e877ae274026289ab0d6762159e4676f2290f939a6caf0583ce7f5"} Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.155793 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"d267893d-1cd2-420b-a2ad-b95f2e2729e1","Type":"ContainerStarted","Data":"35ae3a6765ffe9dce89791457efbc527917f77cd735b7dc961a5f49054cfed06"} Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.155957 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.180921 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.180898066 podStartE2EDuration="2.180898066s" podCreationTimestamp="2025-11-25 10:00:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:00:30.174208631 +0000 UTC m=+1436.027202027" watchObservedRunningTime="2025-11-25 10:00:30.180898066 +0000 UTC m=+1436.033891442" Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.225744 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-create-tg6kv"] Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.227706 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-tg6kv" Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.240088 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-96f1-account-create-2dr75"] Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.242006 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-96f1-account-create-2dr75" Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.247321 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-db-secret" Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.263074 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a16432da-436b-4d4c-b383-e009ff4a4ff6-operator-scripts\") pod \"aodh-db-create-tg6kv\" (UID: \"a16432da-436b-4d4c-b383-e009ff4a4ff6\") " pod="openstack/aodh-db-create-tg6kv" Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.263227 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-79wdk\" (UniqueName: \"kubernetes.io/projected/a16432da-436b-4d4c-b383-e009ff4a4ff6-kube-api-access-79wdk\") pod \"aodh-db-create-tg6kv\" (UID: \"a16432da-436b-4d4c-b383-e009ff4a4ff6\") " pod="openstack/aodh-db-create-tg6kv" Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.281013 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-tg6kv"] Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.312264 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-96f1-account-create-2dr75"] Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.365208 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/94d00e2e-b111-4b29-8ba5-aca5ed86028a-operator-scripts\") pod \"aodh-96f1-account-create-2dr75\" (UID: \"94d00e2e-b111-4b29-8ba5-aca5ed86028a\") " pod="openstack/aodh-96f1-account-create-2dr75" Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.365319 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5hcsn\" (UniqueName: \"kubernetes.io/projected/94d00e2e-b111-4b29-8ba5-aca5ed86028a-kube-api-access-5hcsn\") pod \"aodh-96f1-account-create-2dr75\" (UID: \"94d00e2e-b111-4b29-8ba5-aca5ed86028a\") " pod="openstack/aodh-96f1-account-create-2dr75" Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.365389 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a16432da-436b-4d4c-b383-e009ff4a4ff6-operator-scripts\") pod \"aodh-db-create-tg6kv\" (UID: \"a16432da-436b-4d4c-b383-e009ff4a4ff6\") " pod="openstack/aodh-db-create-tg6kv" Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.365589 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-79wdk\" (UniqueName: \"kubernetes.io/projected/a16432da-436b-4d4c-b383-e009ff4a4ff6-kube-api-access-79wdk\") pod \"aodh-db-create-tg6kv\" (UID: \"a16432da-436b-4d4c-b383-e009ff4a4ff6\") " pod="openstack/aodh-db-create-tg6kv" Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.366980 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a16432da-436b-4d4c-b383-e009ff4a4ff6-operator-scripts\") pod \"aodh-db-create-tg6kv\" (UID: \"a16432da-436b-4d4c-b383-e009ff4a4ff6\") " pod="openstack/aodh-db-create-tg6kv" Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.387660 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-79wdk\" (UniqueName: \"kubernetes.io/projected/a16432da-436b-4d4c-b383-e009ff4a4ff6-kube-api-access-79wdk\") pod \"aodh-db-create-tg6kv\" (UID: \"a16432da-436b-4d4c-b383-e009ff4a4ff6\") " pod="openstack/aodh-db-create-tg6kv" Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.443192 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.467730 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/94d00e2e-b111-4b29-8ba5-aca5ed86028a-operator-scripts\") pod \"aodh-96f1-account-create-2dr75\" (UID: \"94d00e2e-b111-4b29-8ba5-aca5ed86028a\") " pod="openstack/aodh-96f1-account-create-2dr75" Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.467854 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5hcsn\" (UniqueName: \"kubernetes.io/projected/94d00e2e-b111-4b29-8ba5-aca5ed86028a-kube-api-access-5hcsn\") pod \"aodh-96f1-account-create-2dr75\" (UID: \"94d00e2e-b111-4b29-8ba5-aca5ed86028a\") " pod="openstack/aodh-96f1-account-create-2dr75" Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.468493 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/94d00e2e-b111-4b29-8ba5-aca5ed86028a-operator-scripts\") pod \"aodh-96f1-account-create-2dr75\" (UID: \"94d00e2e-b111-4b29-8ba5-aca5ed86028a\") " pod="openstack/aodh-96f1-account-create-2dr75" Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.485430 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5hcsn\" (UniqueName: \"kubernetes.io/projected/94d00e2e-b111-4b29-8ba5-aca5ed86028a-kube-api-access-5hcsn\") pod \"aodh-96f1-account-create-2dr75\" (UID: \"94d00e2e-b111-4b29-8ba5-aca5ed86028a\") " pod="openstack/aodh-96f1-account-create-2dr75" Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.569145 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ffe5bf34-995f-4ee5-a067-cea929353182-scripts\") pod \"ffe5bf34-995f-4ee5-a067-cea929353182\" (UID: \"ffe5bf34-995f-4ee5-a067-cea929353182\") " Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.569365 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ffe5bf34-995f-4ee5-a067-cea929353182-sg-core-conf-yaml\") pod \"ffe5bf34-995f-4ee5-a067-cea929353182\" (UID: \"ffe5bf34-995f-4ee5-a067-cea929353182\") " Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.569403 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffe5bf34-995f-4ee5-a067-cea929353182-config-data\") pod \"ffe5bf34-995f-4ee5-a067-cea929353182\" (UID: \"ffe5bf34-995f-4ee5-a067-cea929353182\") " Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.569448 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffe5bf34-995f-4ee5-a067-cea929353182-run-httpd\") pod \"ffe5bf34-995f-4ee5-a067-cea929353182\" (UID: \"ffe5bf34-995f-4ee5-a067-cea929353182\") " Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.569590 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kklww\" (UniqueName: \"kubernetes.io/projected/ffe5bf34-995f-4ee5-a067-cea929353182-kube-api-access-kklww\") pod \"ffe5bf34-995f-4ee5-a067-cea929353182\" (UID: \"ffe5bf34-995f-4ee5-a067-cea929353182\") " Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.569736 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffe5bf34-995f-4ee5-a067-cea929353182-combined-ca-bundle\") pod \"ffe5bf34-995f-4ee5-a067-cea929353182\" (UID: \"ffe5bf34-995f-4ee5-a067-cea929353182\") " Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.569803 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffe5bf34-995f-4ee5-a067-cea929353182-log-httpd\") pod \"ffe5bf34-995f-4ee5-a067-cea929353182\" (UID: \"ffe5bf34-995f-4ee5-a067-cea929353182\") " Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.570645 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ffe5bf34-995f-4ee5-a067-cea929353182-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "ffe5bf34-995f-4ee5-a067-cea929353182" (UID: "ffe5bf34-995f-4ee5-a067-cea929353182"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.570819 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ffe5bf34-995f-4ee5-a067-cea929353182-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "ffe5bf34-995f-4ee5-a067-cea929353182" (UID: "ffe5bf34-995f-4ee5-a067-cea929353182"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.577126 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffe5bf34-995f-4ee5-a067-cea929353182-scripts" (OuterVolumeSpecName: "scripts") pod "ffe5bf34-995f-4ee5-a067-cea929353182" (UID: "ffe5bf34-995f-4ee5-a067-cea929353182"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.577231 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ffe5bf34-995f-4ee5-a067-cea929353182-kube-api-access-kklww" (OuterVolumeSpecName: "kube-api-access-kklww") pod "ffe5bf34-995f-4ee5-a067-cea929353182" (UID: "ffe5bf34-995f-4ee5-a067-cea929353182"). InnerVolumeSpecName "kube-api-access-kklww". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.602535 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffe5bf34-995f-4ee5-a067-cea929353182-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "ffe5bf34-995f-4ee5-a067-cea929353182" (UID: "ffe5bf34-995f-4ee5-a067-cea929353182"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.672178 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-tg6kv" Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.679872 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kklww\" (UniqueName: \"kubernetes.io/projected/ffe5bf34-995f-4ee5-a067-cea929353182-kube-api-access-kklww\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.679911 4854 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffe5bf34-995f-4ee5-a067-cea929353182-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.679924 4854 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ffe5bf34-995f-4ee5-a067-cea929353182-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.679936 4854 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ffe5bf34-995f-4ee5-a067-cea929353182-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.679947 4854 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffe5bf34-995f-4ee5-a067-cea929353182-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.703195 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffe5bf34-995f-4ee5-a067-cea929353182-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ffe5bf34-995f-4ee5-a067-cea929353182" (UID: "ffe5bf34-995f-4ee5-a067-cea929353182"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.726851 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffe5bf34-995f-4ee5-a067-cea929353182-config-data" (OuterVolumeSpecName: "config-data") pod "ffe5bf34-995f-4ee5-a067-cea929353182" (UID: "ffe5bf34-995f-4ee5-a067-cea929353182"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.741710 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-96f1-account-create-2dr75" Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.782454 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffe5bf34-995f-4ee5-a067-cea929353182-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:30 crc kubenswrapper[4854]: I1125 10:00:30.782494 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffe5bf34-995f-4ee5-a067-cea929353182-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.179025 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.180335 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffe5bf34-995f-4ee5-a067-cea929353182","Type":"ContainerDied","Data":"c92da635116b83da9d8ef2b84770ffb90bb13f5e8986abf04abe965dc1b89f6c"} Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.180382 4854 scope.go:117] "RemoveContainer" containerID="15ec4b0784e877ae274026289ab0d6762159e4676f2290f939a6caf0583ce7f5" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.245981 4854 scope.go:117] "RemoveContainer" containerID="9811aaa30222aa125f7b74099ab6a86f6e540741251fe8c6228266a7f55abe45" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.264625 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.299336 4854 scope.go:117] "RemoveContainer" containerID="47f91c70fed023e2e3bd5bae11818129ba2c9915e4a2b5ab94fd18c983bfab81" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.304178 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.318828 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:00:33 crc kubenswrapper[4854]: E1125 10:00:31.319362 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffe5bf34-995f-4ee5-a067-cea929353182" containerName="proxy-httpd" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.319377 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffe5bf34-995f-4ee5-a067-cea929353182" containerName="proxy-httpd" Nov 25 10:00:33 crc kubenswrapper[4854]: E1125 10:00:31.319407 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffe5bf34-995f-4ee5-a067-cea929353182" containerName="ceilometer-notification-agent" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.319414 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffe5bf34-995f-4ee5-a067-cea929353182" containerName="ceilometer-notification-agent" Nov 25 10:00:33 crc kubenswrapper[4854]: E1125 10:00:31.319436 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffe5bf34-995f-4ee5-a067-cea929353182" containerName="ceilometer-central-agent" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.319444 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffe5bf34-995f-4ee5-a067-cea929353182" containerName="ceilometer-central-agent" Nov 25 10:00:33 crc kubenswrapper[4854]: E1125 10:00:31.319464 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffe5bf34-995f-4ee5-a067-cea929353182" containerName="sg-core" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.319471 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffe5bf34-995f-4ee5-a067-cea929353182" containerName="sg-core" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.319758 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffe5bf34-995f-4ee5-a067-cea929353182" containerName="ceilometer-central-agent" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.319780 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffe5bf34-995f-4ee5-a067-cea929353182" containerName="sg-core" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.319805 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffe5bf34-995f-4ee5-a067-cea929353182" containerName="proxy-httpd" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.319825 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffe5bf34-995f-4ee5-a067-cea929353182" containerName="ceilometer-notification-agent" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.325951 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.329719 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.331616 4854 scope.go:117] "RemoveContainer" containerID="54528e11ec186c07e1c21454810fdc80e8d8671e4a47f5d5fbe7695989b5cdee" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.337650 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.337848 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.365748 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-tg6kv"] Nov 25 10:00:33 crc kubenswrapper[4854]: W1125 10:00:31.387105 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda16432da_436b_4d4c_b383_e009ff4a4ff6.slice/crio-fc1ce70cb30329b8b1d10d10efe6bb7b931f1af836e0f1bdb147f97c6bbd3eec WatchSource:0}: Error finding container fc1ce70cb30329b8b1d10d10efe6bb7b931f1af836e0f1bdb147f97c6bbd3eec: Status 404 returned error can't find the container with id fc1ce70cb30329b8b1d10d10efe6bb7b931f1af836e0f1bdb147f97c6bbd3eec Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.403940 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/10883200-ec1b-4bca-835d-a30490040779-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"10883200-ec1b-4bca-835d-a30490040779\") " pod="openstack/ceilometer-0" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.404004 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tqcdf\" (UniqueName: \"kubernetes.io/projected/10883200-ec1b-4bca-835d-a30490040779-kube-api-access-tqcdf\") pod \"ceilometer-0\" (UID: \"10883200-ec1b-4bca-835d-a30490040779\") " pod="openstack/ceilometer-0" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.404197 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10883200-ec1b-4bca-835d-a30490040779-scripts\") pod \"ceilometer-0\" (UID: \"10883200-ec1b-4bca-835d-a30490040779\") " pod="openstack/ceilometer-0" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.404221 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10883200-ec1b-4bca-835d-a30490040779-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"10883200-ec1b-4bca-835d-a30490040779\") " pod="openstack/ceilometer-0" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.404387 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/10883200-ec1b-4bca-835d-a30490040779-run-httpd\") pod \"ceilometer-0\" (UID: \"10883200-ec1b-4bca-835d-a30490040779\") " pod="openstack/ceilometer-0" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.404452 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/10883200-ec1b-4bca-835d-a30490040779-log-httpd\") pod \"ceilometer-0\" (UID: \"10883200-ec1b-4bca-835d-a30490040779\") " pod="openstack/ceilometer-0" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.404618 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10883200-ec1b-4bca-835d-a30490040779-config-data\") pod \"ceilometer-0\" (UID: \"10883200-ec1b-4bca-835d-a30490040779\") " pod="openstack/ceilometer-0" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.506620 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/10883200-ec1b-4bca-835d-a30490040779-log-httpd\") pod \"ceilometer-0\" (UID: \"10883200-ec1b-4bca-835d-a30490040779\") " pod="openstack/ceilometer-0" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.506660 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10883200-ec1b-4bca-835d-a30490040779-config-data\") pod \"ceilometer-0\" (UID: \"10883200-ec1b-4bca-835d-a30490040779\") " pod="openstack/ceilometer-0" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.506797 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/10883200-ec1b-4bca-835d-a30490040779-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"10883200-ec1b-4bca-835d-a30490040779\") " pod="openstack/ceilometer-0" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.506843 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tqcdf\" (UniqueName: \"kubernetes.io/projected/10883200-ec1b-4bca-835d-a30490040779-kube-api-access-tqcdf\") pod \"ceilometer-0\" (UID: \"10883200-ec1b-4bca-835d-a30490040779\") " pod="openstack/ceilometer-0" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.507051 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10883200-ec1b-4bca-835d-a30490040779-scripts\") pod \"ceilometer-0\" (UID: \"10883200-ec1b-4bca-835d-a30490040779\") " pod="openstack/ceilometer-0" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.507103 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10883200-ec1b-4bca-835d-a30490040779-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"10883200-ec1b-4bca-835d-a30490040779\") " pod="openstack/ceilometer-0" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.507196 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/10883200-ec1b-4bca-835d-a30490040779-run-httpd\") pod \"ceilometer-0\" (UID: \"10883200-ec1b-4bca-835d-a30490040779\") " pod="openstack/ceilometer-0" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.507982 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/10883200-ec1b-4bca-835d-a30490040779-run-httpd\") pod \"ceilometer-0\" (UID: \"10883200-ec1b-4bca-835d-a30490040779\") " pod="openstack/ceilometer-0" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.508079 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/10883200-ec1b-4bca-835d-a30490040779-log-httpd\") pod \"ceilometer-0\" (UID: \"10883200-ec1b-4bca-835d-a30490040779\") " pod="openstack/ceilometer-0" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.514319 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/10883200-ec1b-4bca-835d-a30490040779-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"10883200-ec1b-4bca-835d-a30490040779\") " pod="openstack/ceilometer-0" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.516243 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10883200-ec1b-4bca-835d-a30490040779-config-data\") pod \"ceilometer-0\" (UID: \"10883200-ec1b-4bca-835d-a30490040779\") " pod="openstack/ceilometer-0" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.518961 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10883200-ec1b-4bca-835d-a30490040779-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"10883200-ec1b-4bca-835d-a30490040779\") " pod="openstack/ceilometer-0" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.523989 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10883200-ec1b-4bca-835d-a30490040779-scripts\") pod \"ceilometer-0\" (UID: \"10883200-ec1b-4bca-835d-a30490040779\") " pod="openstack/ceilometer-0" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.530339 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tqcdf\" (UniqueName: \"kubernetes.io/projected/10883200-ec1b-4bca-835d-a30490040779-kube-api-access-tqcdf\") pod \"ceilometer-0\" (UID: \"10883200-ec1b-4bca-835d-a30490040779\") " pod="openstack/ceilometer-0" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:31.647213 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:32.192114 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-tg6kv" event={"ID":"a16432da-436b-4d4c-b383-e009ff4a4ff6","Type":"ContainerStarted","Data":"28baefc9c156e4f2be20de528e25a47ed40d871854d383c7d33dcf83a162b73c"} Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:32.192631 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-tg6kv" event={"ID":"a16432da-436b-4d4c-b383-e009ff4a4ff6","Type":"ContainerStarted","Data":"fc1ce70cb30329b8b1d10d10efe6bb7b931f1af836e0f1bdb147f97c6bbd3eec"} Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:33.027317 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ffe5bf34-995f-4ee5-a067-cea929353182" path="/var/lib/kubelet/pods/ffe5bf34-995f-4ee5-a067-cea929353182/volumes" Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:33.207796 4854 generic.go:334] "Generic (PLEG): container finished" podID="a16432da-436b-4d4c-b383-e009ff4a4ff6" containerID="28baefc9c156e4f2be20de528e25a47ed40d871854d383c7d33dcf83a162b73c" exitCode=0 Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:33.207877 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-tg6kv" event={"ID":"a16432da-436b-4d4c-b383-e009ff4a4ff6","Type":"ContainerDied","Data":"28baefc9c156e4f2be20de528e25a47ed40d871854d383c7d33dcf83a162b73c"} Nov 25 10:00:33 crc kubenswrapper[4854]: W1125 10:00:33.521240 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod94d00e2e_b111_4b29_8ba5_aca5ed86028a.slice/crio-3ca1a847b1d5ca708289c811c8e07fe0f7fcec97551fbacd73fd4cd9b4e0c01a WatchSource:0}: Error finding container 3ca1a847b1d5ca708289c811c8e07fe0f7fcec97551fbacd73fd4cd9b4e0c01a: Status 404 returned error can't find the container with id 3ca1a847b1d5ca708289c811c8e07fe0f7fcec97551fbacd73fd4cd9b4e0c01a Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:33.522050 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-96f1-account-create-2dr75"] Nov 25 10:00:33 crc kubenswrapper[4854]: I1125 10:00:33.539507 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:00:34 crc kubenswrapper[4854]: I1125 10:00:34.222099 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"10883200-ec1b-4bca-835d-a30490040779","Type":"ContainerStarted","Data":"9dbecd9c9833fc94168a42d2fdd91f45743710c2d291ff023bc8b6fa1611d0d6"} Nov 25 10:00:34 crc kubenswrapper[4854]: I1125 10:00:34.226079 4854 generic.go:334] "Generic (PLEG): container finished" podID="94d00e2e-b111-4b29-8ba5-aca5ed86028a" containerID="99c0778f593da4ff167bce1ee9650bf267fd5b2bf236db328efee2554568c31f" exitCode=0 Nov 25 10:00:34 crc kubenswrapper[4854]: I1125 10:00:34.226251 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-96f1-account-create-2dr75" event={"ID":"94d00e2e-b111-4b29-8ba5-aca5ed86028a","Type":"ContainerDied","Data":"99c0778f593da4ff167bce1ee9650bf267fd5b2bf236db328efee2554568c31f"} Nov 25 10:00:34 crc kubenswrapper[4854]: I1125 10:00:34.226306 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-96f1-account-create-2dr75" event={"ID":"94d00e2e-b111-4b29-8ba5-aca5ed86028a","Type":"ContainerStarted","Data":"3ca1a847b1d5ca708289c811c8e07fe0f7fcec97551fbacd73fd4cd9b4e0c01a"} Nov 25 10:00:34 crc kubenswrapper[4854]: I1125 10:00:34.760981 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-tg6kv" Nov 25 10:00:34 crc kubenswrapper[4854]: I1125 10:00:34.793338 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-79wdk\" (UniqueName: \"kubernetes.io/projected/a16432da-436b-4d4c-b383-e009ff4a4ff6-kube-api-access-79wdk\") pod \"a16432da-436b-4d4c-b383-e009ff4a4ff6\" (UID: \"a16432da-436b-4d4c-b383-e009ff4a4ff6\") " Nov 25 10:00:34 crc kubenswrapper[4854]: I1125 10:00:34.793690 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a16432da-436b-4d4c-b383-e009ff4a4ff6-operator-scripts\") pod \"a16432da-436b-4d4c-b383-e009ff4a4ff6\" (UID: \"a16432da-436b-4d4c-b383-e009ff4a4ff6\") " Nov 25 10:00:34 crc kubenswrapper[4854]: I1125 10:00:34.794284 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a16432da-436b-4d4c-b383-e009ff4a4ff6-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a16432da-436b-4d4c-b383-e009ff4a4ff6" (UID: "a16432da-436b-4d4c-b383-e009ff4a4ff6"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:00:34 crc kubenswrapper[4854]: I1125 10:00:34.796963 4854 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a16432da-436b-4d4c-b383-e009ff4a4ff6-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:34 crc kubenswrapper[4854]: I1125 10:00:34.805958 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a16432da-436b-4d4c-b383-e009ff4a4ff6-kube-api-access-79wdk" (OuterVolumeSpecName: "kube-api-access-79wdk") pod "a16432da-436b-4d4c-b383-e009ff4a4ff6" (UID: "a16432da-436b-4d4c-b383-e009ff4a4ff6"). InnerVolumeSpecName "kube-api-access-79wdk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:00:34 crc kubenswrapper[4854]: I1125 10:00:34.899561 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-79wdk\" (UniqueName: \"kubernetes.io/projected/a16432da-436b-4d4c-b383-e009ff4a4ff6-kube-api-access-79wdk\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:35 crc kubenswrapper[4854]: I1125 10:00:35.251357 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-tg6kv" Nov 25 10:00:35 crc kubenswrapper[4854]: I1125 10:00:35.251343 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-tg6kv" event={"ID":"a16432da-436b-4d4c-b383-e009ff4a4ff6","Type":"ContainerDied","Data":"fc1ce70cb30329b8b1d10d10efe6bb7b931f1af836e0f1bdb147f97c6bbd3eec"} Nov 25 10:00:35 crc kubenswrapper[4854]: I1125 10:00:35.251458 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fc1ce70cb30329b8b1d10d10efe6bb7b931f1af836e0f1bdb147f97c6bbd3eec" Nov 25 10:00:35 crc kubenswrapper[4854]: I1125 10:00:35.252951 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"10883200-ec1b-4bca-835d-a30490040779","Type":"ContainerStarted","Data":"e00bb924889ada06fb650c35567ed8f9d43660802607200e06ae1b0a4ce80254"} Nov 25 10:00:35 crc kubenswrapper[4854]: E1125 10:00:35.582170 4854 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda16432da_436b_4d4c_b383_e009ff4a4ff6.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda16432da_436b_4d4c_b383_e009ff4a4ff6.slice/crio-fc1ce70cb30329b8b1d10d10efe6bb7b931f1af836e0f1bdb147f97c6bbd3eec\": RecentStats: unable to find data in memory cache]" Nov 25 10:00:35 crc kubenswrapper[4854]: I1125 10:00:35.750430 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-96f1-account-create-2dr75" Nov 25 10:00:35 crc kubenswrapper[4854]: I1125 10:00:35.824382 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5hcsn\" (UniqueName: \"kubernetes.io/projected/94d00e2e-b111-4b29-8ba5-aca5ed86028a-kube-api-access-5hcsn\") pod \"94d00e2e-b111-4b29-8ba5-aca5ed86028a\" (UID: \"94d00e2e-b111-4b29-8ba5-aca5ed86028a\") " Nov 25 10:00:35 crc kubenswrapper[4854]: I1125 10:00:35.824621 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/94d00e2e-b111-4b29-8ba5-aca5ed86028a-operator-scripts\") pod \"94d00e2e-b111-4b29-8ba5-aca5ed86028a\" (UID: \"94d00e2e-b111-4b29-8ba5-aca5ed86028a\") " Nov 25 10:00:35 crc kubenswrapper[4854]: I1125 10:00:35.825647 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/94d00e2e-b111-4b29-8ba5-aca5ed86028a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "94d00e2e-b111-4b29-8ba5-aca5ed86028a" (UID: "94d00e2e-b111-4b29-8ba5-aca5ed86028a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:00:35 crc kubenswrapper[4854]: I1125 10:00:35.850575 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/94d00e2e-b111-4b29-8ba5-aca5ed86028a-kube-api-access-5hcsn" (OuterVolumeSpecName: "kube-api-access-5hcsn") pod "94d00e2e-b111-4b29-8ba5-aca5ed86028a" (UID: "94d00e2e-b111-4b29-8ba5-aca5ed86028a"). InnerVolumeSpecName "kube-api-access-5hcsn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:00:35 crc kubenswrapper[4854]: I1125 10:00:35.927230 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5hcsn\" (UniqueName: \"kubernetes.io/projected/94d00e2e-b111-4b29-8ba5-aca5ed86028a-kube-api-access-5hcsn\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:35 crc kubenswrapper[4854]: I1125 10:00:35.927268 4854 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/94d00e2e-b111-4b29-8ba5-aca5ed86028a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:36 crc kubenswrapper[4854]: I1125 10:00:36.265165 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"10883200-ec1b-4bca-835d-a30490040779","Type":"ContainerStarted","Data":"7f00644ded00b948a1b0e02d294b2bc9a4e59b086eec9162f8c2b7c9f38de428"} Nov 25 10:00:36 crc kubenswrapper[4854]: I1125 10:00:36.267405 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-96f1-account-create-2dr75" event={"ID":"94d00e2e-b111-4b29-8ba5-aca5ed86028a","Type":"ContainerDied","Data":"3ca1a847b1d5ca708289c811c8e07fe0f7fcec97551fbacd73fd4cd9b4e0c01a"} Nov 25 10:00:36 crc kubenswrapper[4854]: I1125 10:00:36.267448 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3ca1a847b1d5ca708289c811c8e07fe0f7fcec97551fbacd73fd4cd9b4e0c01a" Nov 25 10:00:36 crc kubenswrapper[4854]: I1125 10:00:36.267502 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-96f1-account-create-2dr75" Nov 25 10:00:37 crc kubenswrapper[4854]: I1125 10:00:37.284902 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"10883200-ec1b-4bca-835d-a30490040779","Type":"ContainerStarted","Data":"84082d390b0f998eeb9a7f4ca7cdd42b8e0eecd1c8b9b90a98e9acc62bd56142"} Nov 25 10:00:38 crc kubenswrapper[4854]: I1125 10:00:38.296718 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"10883200-ec1b-4bca-835d-a30490040779","Type":"ContainerStarted","Data":"17f945bd7cd527c6afceecfcfb99fd6917f7636699a41400b91f3355e4b228ed"} Nov 25 10:00:38 crc kubenswrapper[4854]: I1125 10:00:38.296884 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 10:00:38 crc kubenswrapper[4854]: I1125 10:00:38.323382 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.000265901 podStartE2EDuration="7.323360552s" podCreationTimestamp="2025-11-25 10:00:31 +0000 UTC" firstStartedPulling="2025-11-25 10:00:33.554593519 +0000 UTC m=+1439.407586895" lastFinishedPulling="2025-11-25 10:00:37.87768817 +0000 UTC m=+1443.730681546" observedRunningTime="2025-11-25 10:00:38.314007382 +0000 UTC m=+1444.167000758" watchObservedRunningTime="2025-11-25 10:00:38.323360552 +0000 UTC m=+1444.176353928" Nov 25 10:00:38 crc kubenswrapper[4854]: I1125 10:00:38.606864 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.254255 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-wg4tc"] Nov 25 10:00:39 crc kubenswrapper[4854]: E1125 10:00:39.254905 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a16432da-436b-4d4c-b383-e009ff4a4ff6" containerName="mariadb-database-create" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.254932 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="a16432da-436b-4d4c-b383-e009ff4a4ff6" containerName="mariadb-database-create" Nov 25 10:00:39 crc kubenswrapper[4854]: E1125 10:00:39.254990 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94d00e2e-b111-4b29-8ba5-aca5ed86028a" containerName="mariadb-account-create" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.255000 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="94d00e2e-b111-4b29-8ba5-aca5ed86028a" containerName="mariadb-account-create" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.255273 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="a16432da-436b-4d4c-b383-e009ff4a4ff6" containerName="mariadb-database-create" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.255318 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="94d00e2e-b111-4b29-8ba5-aca5ed86028a" containerName="mariadb-account-create" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.256275 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-wg4tc" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.259542 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.259886 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.272767 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-wg4tc"] Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.406377 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3722d7b-30f5-414d-b947-9be5a8494449-scripts\") pod \"nova-cell0-cell-mapping-wg4tc\" (UID: \"b3722d7b-30f5-414d-b947-9be5a8494449\") " pod="openstack/nova-cell0-cell-mapping-wg4tc" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.406431 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3722d7b-30f5-414d-b947-9be5a8494449-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-wg4tc\" (UID: \"b3722d7b-30f5-414d-b947-9be5a8494449\") " pod="openstack/nova-cell0-cell-mapping-wg4tc" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.406473 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z4rpb\" (UniqueName: \"kubernetes.io/projected/b3722d7b-30f5-414d-b947-9be5a8494449-kube-api-access-z4rpb\") pod \"nova-cell0-cell-mapping-wg4tc\" (UID: \"b3722d7b-30f5-414d-b947-9be5a8494449\") " pod="openstack/nova-cell0-cell-mapping-wg4tc" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.406551 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3722d7b-30f5-414d-b947-9be5a8494449-config-data\") pod \"nova-cell0-cell-mapping-wg4tc\" (UID: \"b3722d7b-30f5-414d-b947-9be5a8494449\") " pod="openstack/nova-cell0-cell-mapping-wg4tc" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.511213 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3722d7b-30f5-414d-b947-9be5a8494449-config-data\") pod \"nova-cell0-cell-mapping-wg4tc\" (UID: \"b3722d7b-30f5-414d-b947-9be5a8494449\") " pod="openstack/nova-cell0-cell-mapping-wg4tc" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.511542 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3722d7b-30f5-414d-b947-9be5a8494449-scripts\") pod \"nova-cell0-cell-mapping-wg4tc\" (UID: \"b3722d7b-30f5-414d-b947-9be5a8494449\") " pod="openstack/nova-cell0-cell-mapping-wg4tc" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.511572 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3722d7b-30f5-414d-b947-9be5a8494449-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-wg4tc\" (UID: \"b3722d7b-30f5-414d-b947-9be5a8494449\") " pod="openstack/nova-cell0-cell-mapping-wg4tc" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.511605 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z4rpb\" (UniqueName: \"kubernetes.io/projected/b3722d7b-30f5-414d-b947-9be5a8494449-kube-api-access-z4rpb\") pod \"nova-cell0-cell-mapping-wg4tc\" (UID: \"b3722d7b-30f5-414d-b947-9be5a8494449\") " pod="openstack/nova-cell0-cell-mapping-wg4tc" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.522257 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3722d7b-30f5-414d-b947-9be5a8494449-scripts\") pod \"nova-cell0-cell-mapping-wg4tc\" (UID: \"b3722d7b-30f5-414d-b947-9be5a8494449\") " pod="openstack/nova-cell0-cell-mapping-wg4tc" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.528146 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3722d7b-30f5-414d-b947-9be5a8494449-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-wg4tc\" (UID: \"b3722d7b-30f5-414d-b947-9be5a8494449\") " pod="openstack/nova-cell0-cell-mapping-wg4tc" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.529791 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3722d7b-30f5-414d-b947-9be5a8494449-config-data\") pod \"nova-cell0-cell-mapping-wg4tc\" (UID: \"b3722d7b-30f5-414d-b947-9be5a8494449\") " pod="openstack/nova-cell0-cell-mapping-wg4tc" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.538443 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z4rpb\" (UniqueName: \"kubernetes.io/projected/b3722d7b-30f5-414d-b947-9be5a8494449-kube-api-access-z4rpb\") pod \"nova-cell0-cell-mapping-wg4tc\" (UID: \"b3722d7b-30f5-414d-b947-9be5a8494449\") " pod="openstack/nova-cell0-cell-mapping-wg4tc" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.576641 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-wg4tc" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.709648 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.712160 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.718878 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.774951 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.776532 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.779259 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.817907 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.819947 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/812423ef-93ee-4ed5-80d9-6429fb098b06-config-data\") pod \"nova-api-0\" (UID: \"812423ef-93ee-4ed5-80d9-6429fb098b06\") " pod="openstack/nova-api-0" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.819989 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z9lxr\" (UniqueName: \"kubernetes.io/projected/812423ef-93ee-4ed5-80d9-6429fb098b06-kube-api-access-z9lxr\") pod \"nova-api-0\" (UID: \"812423ef-93ee-4ed5-80d9-6429fb098b06\") " pod="openstack/nova-api-0" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.820014 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/812423ef-93ee-4ed5-80d9-6429fb098b06-logs\") pod \"nova-api-0\" (UID: \"812423ef-93ee-4ed5-80d9-6429fb098b06\") " pod="openstack/nova-api-0" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.820196 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/812423ef-93ee-4ed5-80d9-6429fb098b06-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"812423ef-93ee-4ed5-80d9-6429fb098b06\") " pod="openstack/nova-api-0" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.827541 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.837162 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.838725 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.896913 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.918057 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.925988 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fq4dk\" (UniqueName: \"kubernetes.io/projected/d2b5b62d-c86d-4e14-a9ff-b41300b16a72-kube-api-access-fq4dk\") pod \"nova-scheduler-0\" (UID: \"d2b5b62d-c86d-4e14-a9ff-b41300b16a72\") " pod="openstack/nova-scheduler-0" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.926083 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2b5b62d-c86d-4e14-a9ff-b41300b16a72-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"d2b5b62d-c86d-4e14-a9ff-b41300b16a72\") " pod="openstack/nova-scheduler-0" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.926656 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d2b5b62d-c86d-4e14-a9ff-b41300b16a72-config-data\") pod \"nova-scheduler-0\" (UID: \"d2b5b62d-c86d-4e14-a9ff-b41300b16a72\") " pod="openstack/nova-scheduler-0" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.926743 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/812423ef-93ee-4ed5-80d9-6429fb098b06-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"812423ef-93ee-4ed5-80d9-6429fb098b06\") " pod="openstack/nova-api-0" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.926871 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/812423ef-93ee-4ed5-80d9-6429fb098b06-config-data\") pod \"nova-api-0\" (UID: \"812423ef-93ee-4ed5-80d9-6429fb098b06\") " pod="openstack/nova-api-0" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.926897 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z9lxr\" (UniqueName: \"kubernetes.io/projected/812423ef-93ee-4ed5-80d9-6429fb098b06-kube-api-access-z9lxr\") pod \"nova-api-0\" (UID: \"812423ef-93ee-4ed5-80d9-6429fb098b06\") " pod="openstack/nova-api-0" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.926924 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/812423ef-93ee-4ed5-80d9-6429fb098b06-logs\") pod \"nova-api-0\" (UID: \"812423ef-93ee-4ed5-80d9-6429fb098b06\") " pod="openstack/nova-api-0" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.927589 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/812423ef-93ee-4ed5-80d9-6429fb098b06-logs\") pod \"nova-api-0\" (UID: \"812423ef-93ee-4ed5-80d9-6429fb098b06\") " pod="openstack/nova-api-0" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.935805 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/812423ef-93ee-4ed5-80d9-6429fb098b06-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"812423ef-93ee-4ed5-80d9-6429fb098b06\") " pod="openstack/nova-api-0" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.946625 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/812423ef-93ee-4ed5-80d9-6429fb098b06-config-data\") pod \"nova-api-0\" (UID: \"812423ef-93ee-4ed5-80d9-6429fb098b06\") " pod="openstack/nova-api-0" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.970970 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.972373 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z9lxr\" (UniqueName: \"kubernetes.io/projected/812423ef-93ee-4ed5-80d9-6429fb098b06-kube-api-access-z9lxr\") pod \"nova-api-0\" (UID: \"812423ef-93ee-4ed5-80d9-6429fb098b06\") " pod="openstack/nova-api-0" Nov 25 10:00:39 crc kubenswrapper[4854]: I1125 10:00:39.979468 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:39.986213 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.030559 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2jbt4\" (UniqueName: \"kubernetes.io/projected/97e394d1-03c3-467a-84b3-daf51739d393-kube-api-access-2jbt4\") pod \"nova-metadata-0\" (UID: \"97e394d1-03c3-467a-84b3-daf51739d393\") " pod="openstack/nova-metadata-0" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.030873 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97e394d1-03c3-467a-84b3-daf51739d393-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"97e394d1-03c3-467a-84b3-daf51739d393\") " pod="openstack/nova-metadata-0" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.030940 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97e394d1-03c3-467a-84b3-daf51739d393-logs\") pod \"nova-metadata-0\" (UID: \"97e394d1-03c3-467a-84b3-daf51739d393\") " pod="openstack/nova-metadata-0" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.031238 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fq4dk\" (UniqueName: \"kubernetes.io/projected/d2b5b62d-c86d-4e14-a9ff-b41300b16a72-kube-api-access-fq4dk\") pod \"nova-scheduler-0\" (UID: \"d2b5b62d-c86d-4e14-a9ff-b41300b16a72\") " pod="openstack/nova-scheduler-0" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.031280 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2b5b62d-c86d-4e14-a9ff-b41300b16a72-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"d2b5b62d-c86d-4e14-a9ff-b41300b16a72\") " pod="openstack/nova-scheduler-0" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.031364 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d2b5b62d-c86d-4e14-a9ff-b41300b16a72-config-data\") pod \"nova-scheduler-0\" (UID: \"d2b5b62d-c86d-4e14-a9ff-b41300b16a72\") " pod="openstack/nova-scheduler-0" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.031413 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97e394d1-03c3-467a-84b3-daf51739d393-config-data\") pod \"nova-metadata-0\" (UID: \"97e394d1-03c3-467a-84b3-daf51739d393\") " pod="openstack/nova-metadata-0" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.033722 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.038228 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2b5b62d-c86d-4e14-a9ff-b41300b16a72-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"d2b5b62d-c86d-4e14-a9ff-b41300b16a72\") " pod="openstack/nova-scheduler-0" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.081472 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fq4dk\" (UniqueName: \"kubernetes.io/projected/d2b5b62d-c86d-4e14-a9ff-b41300b16a72-kube-api-access-fq4dk\") pod \"nova-scheduler-0\" (UID: \"d2b5b62d-c86d-4e14-a9ff-b41300b16a72\") " pod="openstack/nova-scheduler-0" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.084781 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-568d7fd7cf-77kl2"] Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.092170 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-568d7fd7cf-77kl2" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.098683 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d2b5b62d-c86d-4e14-a9ff-b41300b16a72-config-data\") pod \"nova-scheduler-0\" (UID: \"d2b5b62d-c86d-4e14-a9ff-b41300b16a72\") " pod="openstack/nova-scheduler-0" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.119735 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-568d7fd7cf-77kl2"] Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.135174 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tthxv\" (UniqueName: \"kubernetes.io/projected/2784a16c-11ca-4472-bd50-f33dfb6f1086-kube-api-access-tthxv\") pod \"nova-cell1-novncproxy-0\" (UID: \"2784a16c-11ca-4472-bd50-f33dfb6f1086\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.138719 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2jbt4\" (UniqueName: \"kubernetes.io/projected/97e394d1-03c3-467a-84b3-daf51739d393-kube-api-access-2jbt4\") pod \"nova-metadata-0\" (UID: \"97e394d1-03c3-467a-84b3-daf51739d393\") " pod="openstack/nova-metadata-0" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.139009 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97e394d1-03c3-467a-84b3-daf51739d393-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"97e394d1-03c3-467a-84b3-daf51739d393\") " pod="openstack/nova-metadata-0" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.139090 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97e394d1-03c3-467a-84b3-daf51739d393-logs\") pod \"nova-metadata-0\" (UID: \"97e394d1-03c3-467a-84b3-daf51739d393\") " pod="openstack/nova-metadata-0" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.139219 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2784a16c-11ca-4472-bd50-f33dfb6f1086-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"2784a16c-11ca-4472-bd50-f33dfb6f1086\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.139434 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97e394d1-03c3-467a-84b3-daf51739d393-config-data\") pod \"nova-metadata-0\" (UID: \"97e394d1-03c3-467a-84b3-daf51739d393\") " pod="openstack/nova-metadata-0" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.139455 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2784a16c-11ca-4472-bd50-f33dfb6f1086-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"2784a16c-11ca-4472-bd50-f33dfb6f1086\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.139910 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.142070 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97e394d1-03c3-467a-84b3-daf51739d393-logs\") pod \"nova-metadata-0\" (UID: \"97e394d1-03c3-467a-84b3-daf51739d393\") " pod="openstack/nova-metadata-0" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.146775 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97e394d1-03c3-467a-84b3-daf51739d393-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"97e394d1-03c3-467a-84b3-daf51739d393\") " pod="openstack/nova-metadata-0" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.158442 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97e394d1-03c3-467a-84b3-daf51739d393-config-data\") pod \"nova-metadata-0\" (UID: \"97e394d1-03c3-467a-84b3-daf51739d393\") " pod="openstack/nova-metadata-0" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.163889 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.204930 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2jbt4\" (UniqueName: \"kubernetes.io/projected/97e394d1-03c3-467a-84b3-daf51739d393-kube-api-access-2jbt4\") pod \"nova-metadata-0\" (UID: \"97e394d1-03c3-467a-84b3-daf51739d393\") " pod="openstack/nova-metadata-0" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.243706 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e-dns-svc\") pod \"dnsmasq-dns-568d7fd7cf-77kl2\" (UID: \"6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e\") " pod="openstack/dnsmasq-dns-568d7fd7cf-77kl2" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.243993 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e-dns-swift-storage-0\") pod \"dnsmasq-dns-568d7fd7cf-77kl2\" (UID: \"6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e\") " pod="openstack/dnsmasq-dns-568d7fd7cf-77kl2" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.244025 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e-ovsdbserver-sb\") pod \"dnsmasq-dns-568d7fd7cf-77kl2\" (UID: \"6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e\") " pod="openstack/dnsmasq-dns-568d7fd7cf-77kl2" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.244128 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e-ovsdbserver-nb\") pod \"dnsmasq-dns-568d7fd7cf-77kl2\" (UID: \"6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e\") " pod="openstack/dnsmasq-dns-568d7fd7cf-77kl2" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.244194 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e-config\") pod \"dnsmasq-dns-568d7fd7cf-77kl2\" (UID: \"6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e\") " pod="openstack/dnsmasq-dns-568d7fd7cf-77kl2" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.244248 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2784a16c-11ca-4472-bd50-f33dfb6f1086-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"2784a16c-11ca-4472-bd50-f33dfb6f1086\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.244293 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2xrsm\" (UniqueName: \"kubernetes.io/projected/6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e-kube-api-access-2xrsm\") pod \"dnsmasq-dns-568d7fd7cf-77kl2\" (UID: \"6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e\") " pod="openstack/dnsmasq-dns-568d7fd7cf-77kl2" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.244370 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2784a16c-11ca-4472-bd50-f33dfb6f1086-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"2784a16c-11ca-4472-bd50-f33dfb6f1086\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.244426 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tthxv\" (UniqueName: \"kubernetes.io/projected/2784a16c-11ca-4472-bd50-f33dfb6f1086-kube-api-access-tthxv\") pod \"nova-cell1-novncproxy-0\" (UID: \"2784a16c-11ca-4472-bd50-f33dfb6f1086\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.253854 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2784a16c-11ca-4472-bd50-f33dfb6f1086-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"2784a16c-11ca-4472-bd50-f33dfb6f1086\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.254651 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2784a16c-11ca-4472-bd50-f33dfb6f1086-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"2784a16c-11ca-4472-bd50-f33dfb6f1086\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.278417 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tthxv\" (UniqueName: \"kubernetes.io/projected/2784a16c-11ca-4472-bd50-f33dfb6f1086-kube-api-access-tthxv\") pod \"nova-cell1-novncproxy-0\" (UID: \"2784a16c-11ca-4472-bd50-f33dfb6f1086\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.346774 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e-dns-svc\") pod \"dnsmasq-dns-568d7fd7cf-77kl2\" (UID: \"6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e\") " pod="openstack/dnsmasq-dns-568d7fd7cf-77kl2" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.346829 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e-dns-swift-storage-0\") pod \"dnsmasq-dns-568d7fd7cf-77kl2\" (UID: \"6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e\") " pod="openstack/dnsmasq-dns-568d7fd7cf-77kl2" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.346854 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e-ovsdbserver-sb\") pod \"dnsmasq-dns-568d7fd7cf-77kl2\" (UID: \"6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e\") " pod="openstack/dnsmasq-dns-568d7fd7cf-77kl2" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.346939 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e-ovsdbserver-nb\") pod \"dnsmasq-dns-568d7fd7cf-77kl2\" (UID: \"6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e\") " pod="openstack/dnsmasq-dns-568d7fd7cf-77kl2" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.346984 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e-config\") pod \"dnsmasq-dns-568d7fd7cf-77kl2\" (UID: \"6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e\") " pod="openstack/dnsmasq-dns-568d7fd7cf-77kl2" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.347038 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2xrsm\" (UniqueName: \"kubernetes.io/projected/6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e-kube-api-access-2xrsm\") pod \"dnsmasq-dns-568d7fd7cf-77kl2\" (UID: \"6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e\") " pod="openstack/dnsmasq-dns-568d7fd7cf-77kl2" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.348921 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e-dns-svc\") pod \"dnsmasq-dns-568d7fd7cf-77kl2\" (UID: \"6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e\") " pod="openstack/dnsmasq-dns-568d7fd7cf-77kl2" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.349158 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e-config\") pod \"dnsmasq-dns-568d7fd7cf-77kl2\" (UID: \"6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e\") " pod="openstack/dnsmasq-dns-568d7fd7cf-77kl2" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.349180 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e-ovsdbserver-sb\") pod \"dnsmasq-dns-568d7fd7cf-77kl2\" (UID: \"6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e\") " pod="openstack/dnsmasq-dns-568d7fd7cf-77kl2" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.349191 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e-ovsdbserver-nb\") pod \"dnsmasq-dns-568d7fd7cf-77kl2\" (UID: \"6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e\") " pod="openstack/dnsmasq-dns-568d7fd7cf-77kl2" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.350011 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e-dns-swift-storage-0\") pod \"dnsmasq-dns-568d7fd7cf-77kl2\" (UID: \"6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e\") " pod="openstack/dnsmasq-dns-568d7fd7cf-77kl2" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.363599 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.381979 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2xrsm\" (UniqueName: \"kubernetes.io/projected/6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e-kube-api-access-2xrsm\") pod \"dnsmasq-dns-568d7fd7cf-77kl2\" (UID: \"6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e\") " pod="openstack/dnsmasq-dns-568d7fd7cf-77kl2" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.460109 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-568d7fd7cf-77kl2" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.469104 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-wg4tc"] Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.472226 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.813977 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-sync-cbwwc"] Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.815782 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-cbwwc" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.823816 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.824534 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.824958 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.827641 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-jzshp" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.849714 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-cbwwc"] Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.880781 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.882936 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e005c0f-47c9-4e9a-a788-6a9ec4c8db31-combined-ca-bundle\") pod \"aodh-db-sync-cbwwc\" (UID: \"5e005c0f-47c9-4e9a-a788-6a9ec4c8db31\") " pod="openstack/aodh-db-sync-cbwwc" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.883041 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e005c0f-47c9-4e9a-a788-6a9ec4c8db31-config-data\") pod \"aodh-db-sync-cbwwc\" (UID: \"5e005c0f-47c9-4e9a-a788-6a9ec4c8db31\") " pod="openstack/aodh-db-sync-cbwwc" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.883086 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e005c0f-47c9-4e9a-a788-6a9ec4c8db31-scripts\") pod \"aodh-db-sync-cbwwc\" (UID: \"5e005c0f-47c9-4e9a-a788-6a9ec4c8db31\") " pod="openstack/aodh-db-sync-cbwwc" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.883258 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kh8ql\" (UniqueName: \"kubernetes.io/projected/5e005c0f-47c9-4e9a-a788-6a9ec4c8db31-kube-api-access-kh8ql\") pod \"aodh-db-sync-cbwwc\" (UID: \"5e005c0f-47c9-4e9a-a788-6a9ec4c8db31\") " pod="openstack/aodh-db-sync-cbwwc" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.993576 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kh8ql\" (UniqueName: \"kubernetes.io/projected/5e005c0f-47c9-4e9a-a788-6a9ec4c8db31-kube-api-access-kh8ql\") pod \"aodh-db-sync-cbwwc\" (UID: \"5e005c0f-47c9-4e9a-a788-6a9ec4c8db31\") " pod="openstack/aodh-db-sync-cbwwc" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.995148 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e005c0f-47c9-4e9a-a788-6a9ec4c8db31-combined-ca-bundle\") pod \"aodh-db-sync-cbwwc\" (UID: \"5e005c0f-47c9-4e9a-a788-6a9ec4c8db31\") " pod="openstack/aodh-db-sync-cbwwc" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.996135 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e005c0f-47c9-4e9a-a788-6a9ec4c8db31-config-data\") pod \"aodh-db-sync-cbwwc\" (UID: \"5e005c0f-47c9-4e9a-a788-6a9ec4c8db31\") " pod="openstack/aodh-db-sync-cbwwc" Nov 25 10:00:40 crc kubenswrapper[4854]: I1125 10:00:40.996224 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e005c0f-47c9-4e9a-a788-6a9ec4c8db31-scripts\") pod \"aodh-db-sync-cbwwc\" (UID: \"5e005c0f-47c9-4e9a-a788-6a9ec4c8db31\") " pod="openstack/aodh-db-sync-cbwwc" Nov 25 10:00:41 crc kubenswrapper[4854]: I1125 10:00:41.002808 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e005c0f-47c9-4e9a-a788-6a9ec4c8db31-config-data\") pod \"aodh-db-sync-cbwwc\" (UID: \"5e005c0f-47c9-4e9a-a788-6a9ec4c8db31\") " pod="openstack/aodh-db-sync-cbwwc" Nov 25 10:00:41 crc kubenswrapper[4854]: I1125 10:00:41.003500 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e005c0f-47c9-4e9a-a788-6a9ec4c8db31-scripts\") pod \"aodh-db-sync-cbwwc\" (UID: \"5e005c0f-47c9-4e9a-a788-6a9ec4c8db31\") " pod="openstack/aodh-db-sync-cbwwc" Nov 25 10:00:41 crc kubenswrapper[4854]: I1125 10:00:41.006307 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e005c0f-47c9-4e9a-a788-6a9ec4c8db31-combined-ca-bundle\") pod \"aodh-db-sync-cbwwc\" (UID: \"5e005c0f-47c9-4e9a-a788-6a9ec4c8db31\") " pod="openstack/aodh-db-sync-cbwwc" Nov 25 10:00:41 crc kubenswrapper[4854]: I1125 10:00:41.012166 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kh8ql\" (UniqueName: \"kubernetes.io/projected/5e005c0f-47c9-4e9a-a788-6a9ec4c8db31-kube-api-access-kh8ql\") pod \"aodh-db-sync-cbwwc\" (UID: \"5e005c0f-47c9-4e9a-a788-6a9ec4c8db31\") " pod="openstack/aodh-db-sync-cbwwc" Nov 25 10:00:41 crc kubenswrapper[4854]: I1125 10:00:41.117363 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:00:41 crc kubenswrapper[4854]: W1125 10:00:41.147275 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2784a16c_11ca_4472_bd50_f33dfb6f1086.slice/crio-9f7246ec141b8d1bc4971385d65f625a4aed358e2c6c9ebf25406c9e83bf6a29 WatchSource:0}: Error finding container 9f7246ec141b8d1bc4971385d65f625a4aed358e2c6c9ebf25406c9e83bf6a29: Status 404 returned error can't find the container with id 9f7246ec141b8d1bc4971385d65f625a4aed358e2c6c9ebf25406c9e83bf6a29 Nov 25 10:00:41 crc kubenswrapper[4854]: I1125 10:00:41.147654 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 10:00:41 crc kubenswrapper[4854]: I1125 10:00:41.228893 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-cbwwc" Nov 25 10:00:41 crc kubenswrapper[4854]: I1125 10:00:41.408488 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-568d7fd7cf-77kl2"] Nov 25 10:00:41 crc kubenswrapper[4854]: I1125 10:00:41.410152 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"812423ef-93ee-4ed5-80d9-6429fb098b06","Type":"ContainerStarted","Data":"866d1d36d52c4588b10ee5003a4f9297494a8e4f5e7db0a504578ff6b3eb6617"} Nov 25 10:00:41 crc kubenswrapper[4854]: I1125 10:00:41.417470 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-wg4tc" event={"ID":"b3722d7b-30f5-414d-b947-9be5a8494449","Type":"ContainerStarted","Data":"b0d047cca42fd9d3599491104df3f727dc0566c60422fcde475d13bdfb653ff4"} Nov 25 10:00:41 crc kubenswrapper[4854]: I1125 10:00:41.417517 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-wg4tc" event={"ID":"b3722d7b-30f5-414d-b947-9be5a8494449","Type":"ContainerStarted","Data":"be39e538154279035aa3bf7e4c83c2e3be8f4ef7723fa2f888648bc4f3f6a7f1"} Nov 25 10:00:41 crc kubenswrapper[4854]: I1125 10:00:41.434503 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"2784a16c-11ca-4472-bd50-f33dfb6f1086","Type":"ContainerStarted","Data":"9f7246ec141b8d1bc4971385d65f625a4aed358e2c6c9ebf25406c9e83bf6a29"} Nov 25 10:00:41 crc kubenswrapper[4854]: I1125 10:00:41.436537 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"d2b5b62d-c86d-4e14-a9ff-b41300b16a72","Type":"ContainerStarted","Data":"ddff51015d6b95d7adcb487153674189a4a636a3007b9499f6e17505b3c06422"} Nov 25 10:00:41 crc kubenswrapper[4854]: I1125 10:00:41.453693 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-wg4tc" podStartSLOduration=2.453661431 podStartE2EDuration="2.453661431s" podCreationTimestamp="2025-11-25 10:00:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:00:41.443586382 +0000 UTC m=+1447.296579758" watchObservedRunningTime="2025-11-25 10:00:41.453661431 +0000 UTC m=+1447.306654817" Nov 25 10:00:41 crc kubenswrapper[4854]: I1125 10:00:41.679393 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:00:41 crc kubenswrapper[4854]: I1125 10:00:41.848057 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-cbwwc"] Nov 25 10:00:42 crc kubenswrapper[4854]: I1125 10:00:42.003755 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-6k8ck"] Nov 25 10:00:42 crc kubenswrapper[4854]: I1125 10:00:42.005476 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-6k8ck" Nov 25 10:00:42 crc kubenswrapper[4854]: I1125 10:00:42.008526 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 25 10:00:42 crc kubenswrapper[4854]: I1125 10:00:42.009741 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 25 10:00:42 crc kubenswrapper[4854]: I1125 10:00:42.056533 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b769df1-897a-4560-a18d-cbb642930a72-scripts\") pod \"nova-cell1-conductor-db-sync-6k8ck\" (UID: \"5b769df1-897a-4560-a18d-cbb642930a72\") " pod="openstack/nova-cell1-conductor-db-sync-6k8ck" Nov 25 10:00:42 crc kubenswrapper[4854]: I1125 10:00:42.056605 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b769df1-897a-4560-a18d-cbb642930a72-config-data\") pod \"nova-cell1-conductor-db-sync-6k8ck\" (UID: \"5b769df1-897a-4560-a18d-cbb642930a72\") " pod="openstack/nova-cell1-conductor-db-sync-6k8ck" Nov 25 10:00:42 crc kubenswrapper[4854]: I1125 10:00:42.056660 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b769df1-897a-4560-a18d-cbb642930a72-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-6k8ck\" (UID: \"5b769df1-897a-4560-a18d-cbb642930a72\") " pod="openstack/nova-cell1-conductor-db-sync-6k8ck" Nov 25 10:00:42 crc kubenswrapper[4854]: I1125 10:00:42.056799 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xvm58\" (UniqueName: \"kubernetes.io/projected/5b769df1-897a-4560-a18d-cbb642930a72-kube-api-access-xvm58\") pod \"nova-cell1-conductor-db-sync-6k8ck\" (UID: \"5b769df1-897a-4560-a18d-cbb642930a72\") " pod="openstack/nova-cell1-conductor-db-sync-6k8ck" Nov 25 10:00:42 crc kubenswrapper[4854]: I1125 10:00:42.066209 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-6k8ck"] Nov 25 10:00:42 crc kubenswrapper[4854]: I1125 10:00:42.161617 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b769df1-897a-4560-a18d-cbb642930a72-scripts\") pod \"nova-cell1-conductor-db-sync-6k8ck\" (UID: \"5b769df1-897a-4560-a18d-cbb642930a72\") " pod="openstack/nova-cell1-conductor-db-sync-6k8ck" Nov 25 10:00:42 crc kubenswrapper[4854]: I1125 10:00:42.162015 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b769df1-897a-4560-a18d-cbb642930a72-config-data\") pod \"nova-cell1-conductor-db-sync-6k8ck\" (UID: \"5b769df1-897a-4560-a18d-cbb642930a72\") " pod="openstack/nova-cell1-conductor-db-sync-6k8ck" Nov 25 10:00:42 crc kubenswrapper[4854]: I1125 10:00:42.162088 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b769df1-897a-4560-a18d-cbb642930a72-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-6k8ck\" (UID: \"5b769df1-897a-4560-a18d-cbb642930a72\") " pod="openstack/nova-cell1-conductor-db-sync-6k8ck" Nov 25 10:00:42 crc kubenswrapper[4854]: I1125 10:00:42.162182 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xvm58\" (UniqueName: \"kubernetes.io/projected/5b769df1-897a-4560-a18d-cbb642930a72-kube-api-access-xvm58\") pod \"nova-cell1-conductor-db-sync-6k8ck\" (UID: \"5b769df1-897a-4560-a18d-cbb642930a72\") " pod="openstack/nova-cell1-conductor-db-sync-6k8ck" Nov 25 10:00:42 crc kubenswrapper[4854]: I1125 10:00:42.174091 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b769df1-897a-4560-a18d-cbb642930a72-scripts\") pod \"nova-cell1-conductor-db-sync-6k8ck\" (UID: \"5b769df1-897a-4560-a18d-cbb642930a72\") " pod="openstack/nova-cell1-conductor-db-sync-6k8ck" Nov 25 10:00:42 crc kubenswrapper[4854]: I1125 10:00:42.174127 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b769df1-897a-4560-a18d-cbb642930a72-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-6k8ck\" (UID: \"5b769df1-897a-4560-a18d-cbb642930a72\") " pod="openstack/nova-cell1-conductor-db-sync-6k8ck" Nov 25 10:00:42 crc kubenswrapper[4854]: I1125 10:00:42.174892 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b769df1-897a-4560-a18d-cbb642930a72-config-data\") pod \"nova-cell1-conductor-db-sync-6k8ck\" (UID: \"5b769df1-897a-4560-a18d-cbb642930a72\") " pod="openstack/nova-cell1-conductor-db-sync-6k8ck" Nov 25 10:00:42 crc kubenswrapper[4854]: I1125 10:00:42.187749 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xvm58\" (UniqueName: \"kubernetes.io/projected/5b769df1-897a-4560-a18d-cbb642930a72-kube-api-access-xvm58\") pod \"nova-cell1-conductor-db-sync-6k8ck\" (UID: \"5b769df1-897a-4560-a18d-cbb642930a72\") " pod="openstack/nova-cell1-conductor-db-sync-6k8ck" Nov 25 10:00:42 crc kubenswrapper[4854]: I1125 10:00:42.360096 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-6k8ck" Nov 25 10:00:42 crc kubenswrapper[4854]: I1125 10:00:42.460764 4854 generic.go:334] "Generic (PLEG): container finished" podID="6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e" containerID="920cbe581a4a32d17e67a15b93fa15f460c7c4807c110ded6bace38167da8886" exitCode=0 Nov 25 10:00:42 crc kubenswrapper[4854]: I1125 10:00:42.460823 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-568d7fd7cf-77kl2" event={"ID":"6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e","Type":"ContainerDied","Data":"920cbe581a4a32d17e67a15b93fa15f460c7c4807c110ded6bace38167da8886"} Nov 25 10:00:42 crc kubenswrapper[4854]: I1125 10:00:42.460991 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-568d7fd7cf-77kl2" event={"ID":"6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e","Type":"ContainerStarted","Data":"ec9456cb159934f6b3334ba6dd272ae3357035210d34cb8bec08f20a9a30bf5c"} Nov 25 10:00:42 crc kubenswrapper[4854]: I1125 10:00:42.478690 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-cbwwc" event={"ID":"5e005c0f-47c9-4e9a-a788-6a9ec4c8db31","Type":"ContainerStarted","Data":"37fa4fe5cadb21b7f5f9ab38ba7e2222b6bc3328efd5e12cc2c20d1310c59cf9"} Nov 25 10:00:42 crc kubenswrapper[4854]: I1125 10:00:42.485891 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"97e394d1-03c3-467a-84b3-daf51739d393","Type":"ContainerStarted","Data":"b086e17b9aaa0b5ce9639643f8c059034f1167635aca0c18d2d2b7740dbae72a"} Nov 25 10:00:43 crc kubenswrapper[4854]: I1125 10:00:43.069662 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-6k8ck"] Nov 25 10:00:43 crc kubenswrapper[4854]: I1125 10:00:43.501330 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-568d7fd7cf-77kl2" event={"ID":"6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e","Type":"ContainerStarted","Data":"3314a63e0071bc5d7dc57371cc76b1b62ccfff082df6b9ddc8b4a10c9b26be06"} Nov 25 10:00:43 crc kubenswrapper[4854]: I1125 10:00:43.501694 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-568d7fd7cf-77kl2" Nov 25 10:00:43 crc kubenswrapper[4854]: I1125 10:00:43.508906 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-6k8ck" event={"ID":"5b769df1-897a-4560-a18d-cbb642930a72","Type":"ContainerStarted","Data":"79539487146f02b729722c061fa178dc6c60b315cb58c41f36da6e45b8c1130a"} Nov 25 10:00:43 crc kubenswrapper[4854]: I1125 10:00:43.527626 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-568d7fd7cf-77kl2" podStartSLOduration=4.527608093 podStartE2EDuration="4.527608093s" podCreationTimestamp="2025-11-25 10:00:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:00:43.526535304 +0000 UTC m=+1449.379528800" watchObservedRunningTime="2025-11-25 10:00:43.527608093 +0000 UTC m=+1449.380601469" Nov 25 10:00:44 crc kubenswrapper[4854]: I1125 10:00:44.172596 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:00:44 crc kubenswrapper[4854]: I1125 10:00:44.260949 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 10:00:44 crc kubenswrapper[4854]: I1125 10:00:44.574968 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-6k8ck" event={"ID":"5b769df1-897a-4560-a18d-cbb642930a72","Type":"ContainerStarted","Data":"3c11e766e45c2c78b8029ebcb599b81e34371c452f7c6fba4eab4be4b6855590"} Nov 25 10:00:44 crc kubenswrapper[4854]: I1125 10:00:44.615504 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-6k8ck" podStartSLOduration=3.615484833 podStartE2EDuration="3.615484833s" podCreationTimestamp="2025-11-25 10:00:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:00:44.607956544 +0000 UTC m=+1450.460949930" watchObservedRunningTime="2025-11-25 10:00:44.615484833 +0000 UTC m=+1450.468478209" Nov 25 10:00:45 crc kubenswrapper[4854]: E1125 10:00:45.967492 4854 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda16432da_436b_4d4c_b383_e009ff4a4ff6.slice/crio-fc1ce70cb30329b8b1d10d10efe6bb7b931f1af836e0f1bdb147f97c6bbd3eec\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda16432da_436b_4d4c_b383_e009ff4a4ff6.slice\": RecentStats: unable to find data in memory cache]" Nov 25 10:00:46 crc kubenswrapper[4854]: E1125 10:00:46.160951 4854 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda16432da_436b_4d4c_b383_e009ff4a4ff6.slice/crio-fc1ce70cb30329b8b1d10d10efe6bb7b931f1af836e0f1bdb147f97c6bbd3eec\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda16432da_436b_4d4c_b383_e009ff4a4ff6.slice\": RecentStats: unable to find data in memory cache]" Nov 25 10:00:48 crc kubenswrapper[4854]: E1125 10:00:48.105277 4854 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda16432da_436b_4d4c_b383_e009ff4a4ff6.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda16432da_436b_4d4c_b383_e009ff4a4ff6.slice/crio-fc1ce70cb30329b8b1d10d10efe6bb7b931f1af836e0f1bdb147f97c6bbd3eec\": RecentStats: unable to find data in memory cache]" Nov 25 10:00:48 crc kubenswrapper[4854]: E1125 10:00:48.106719 4854 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda16432da_436b_4d4c_b383_e009ff4a4ff6.slice/crio-fc1ce70cb30329b8b1d10d10efe6bb7b931f1af836e0f1bdb147f97c6bbd3eec\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda16432da_436b_4d4c_b383_e009ff4a4ff6.slice\": RecentStats: unable to find data in memory cache]" Nov 25 10:00:50 crc kubenswrapper[4854]: I1125 10:00:50.462903 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-568d7fd7cf-77kl2" Nov 25 10:00:50 crc kubenswrapper[4854]: I1125 10:00:50.535583 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-688b9f5b49-k6p96"] Nov 25 10:00:50 crc kubenswrapper[4854]: I1125 10:00:50.536165 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-688b9f5b49-k6p96" podUID="9ff2c539-c2ac-4420-b11e-ba3c88af56be" containerName="dnsmasq-dns" containerID="cri-o://4c86179bd6bab2075dddfa8a94732a16d2f9183fbcbd2217d914610f2ffa5a4d" gracePeriod=10 Nov 25 10:00:51 crc kubenswrapper[4854]: I1125 10:00:51.658136 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-688b9f5b49-k6p96" podUID="9ff2c539-c2ac-4420-b11e-ba3c88af56be" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.214:5353: connect: connection refused" Nov 25 10:00:51 crc kubenswrapper[4854]: I1125 10:00:51.664342 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"97e394d1-03c3-467a-84b3-daf51739d393","Type":"ContainerStarted","Data":"1a339f85a50186188f248e634fa59dbc2d7892af64037b2a96423d487df4076a"} Nov 25 10:00:51 crc kubenswrapper[4854]: I1125 10:00:51.666921 4854 generic.go:334] "Generic (PLEG): container finished" podID="9ff2c539-c2ac-4420-b11e-ba3c88af56be" containerID="4c86179bd6bab2075dddfa8a94732a16d2f9183fbcbd2217d914610f2ffa5a4d" exitCode=0 Nov 25 10:00:51 crc kubenswrapper[4854]: I1125 10:00:51.666991 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688b9f5b49-k6p96" event={"ID":"9ff2c539-c2ac-4420-b11e-ba3c88af56be","Type":"ContainerDied","Data":"4c86179bd6bab2075dddfa8a94732a16d2f9183fbcbd2217d914610f2ffa5a4d"} Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.352818 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688b9f5b49-k6p96" Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.451261 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9ff2c539-c2ac-4420-b11e-ba3c88af56be-ovsdbserver-nb\") pod \"9ff2c539-c2ac-4420-b11e-ba3c88af56be\" (UID: \"9ff2c539-c2ac-4420-b11e-ba3c88af56be\") " Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.451404 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bc7dz\" (UniqueName: \"kubernetes.io/projected/9ff2c539-c2ac-4420-b11e-ba3c88af56be-kube-api-access-bc7dz\") pod \"9ff2c539-c2ac-4420-b11e-ba3c88af56be\" (UID: \"9ff2c539-c2ac-4420-b11e-ba3c88af56be\") " Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.451471 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ff2c539-c2ac-4420-b11e-ba3c88af56be-config\") pod \"9ff2c539-c2ac-4420-b11e-ba3c88af56be\" (UID: \"9ff2c539-c2ac-4420-b11e-ba3c88af56be\") " Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.451494 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9ff2c539-c2ac-4420-b11e-ba3c88af56be-ovsdbserver-sb\") pod \"9ff2c539-c2ac-4420-b11e-ba3c88af56be\" (UID: \"9ff2c539-c2ac-4420-b11e-ba3c88af56be\") " Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.451520 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9ff2c539-c2ac-4420-b11e-ba3c88af56be-dns-swift-storage-0\") pod \"9ff2c539-c2ac-4420-b11e-ba3c88af56be\" (UID: \"9ff2c539-c2ac-4420-b11e-ba3c88af56be\") " Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.451701 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9ff2c539-c2ac-4420-b11e-ba3c88af56be-dns-svc\") pod \"9ff2c539-c2ac-4420-b11e-ba3c88af56be\" (UID: \"9ff2c539-c2ac-4420-b11e-ba3c88af56be\") " Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.479502 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ff2c539-c2ac-4420-b11e-ba3c88af56be-kube-api-access-bc7dz" (OuterVolumeSpecName: "kube-api-access-bc7dz") pod "9ff2c539-c2ac-4420-b11e-ba3c88af56be" (UID: "9ff2c539-c2ac-4420-b11e-ba3c88af56be"). InnerVolumeSpecName "kube-api-access-bc7dz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.569770 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bc7dz\" (UniqueName: \"kubernetes.io/projected/9ff2c539-c2ac-4420-b11e-ba3c88af56be-kube-api-access-bc7dz\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.639425 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ff2c539-c2ac-4420-b11e-ba3c88af56be-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9ff2c539-c2ac-4420-b11e-ba3c88af56be" (UID: "9ff2c539-c2ac-4420-b11e-ba3c88af56be"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.645124 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ff2c539-c2ac-4420-b11e-ba3c88af56be-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "9ff2c539-c2ac-4420-b11e-ba3c88af56be" (UID: "9ff2c539-c2ac-4420-b11e-ba3c88af56be"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.663542 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ff2c539-c2ac-4420-b11e-ba3c88af56be-config" (OuterVolumeSpecName: "config") pod "9ff2c539-c2ac-4420-b11e-ba3c88af56be" (UID: "9ff2c539-c2ac-4420-b11e-ba3c88af56be"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.665353 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ff2c539-c2ac-4420-b11e-ba3c88af56be-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "9ff2c539-c2ac-4420-b11e-ba3c88af56be" (UID: "9ff2c539-c2ac-4420-b11e-ba3c88af56be"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.672032 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ff2c539-c2ac-4420-b11e-ba3c88af56be-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "9ff2c539-c2ac-4420-b11e-ba3c88af56be" (UID: "9ff2c539-c2ac-4420-b11e-ba3c88af56be"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.672977 4854 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9ff2c539-c2ac-4420-b11e-ba3c88af56be-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.673013 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ff2c539-c2ac-4420-b11e-ba3c88af56be-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.673025 4854 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9ff2c539-c2ac-4420-b11e-ba3c88af56be-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.673037 4854 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9ff2c539-c2ac-4420-b11e-ba3c88af56be-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.673049 4854 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9ff2c539-c2ac-4420-b11e-ba3c88af56be-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.694502 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"2784a16c-11ca-4472-bd50-f33dfb6f1086","Type":"ContainerStarted","Data":"7b82f2faada8418d8877a74fa0bf4a4597bd1b06d2fecea671ecf522e2717795"} Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.694639 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="2784a16c-11ca-4472-bd50-f33dfb6f1086" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://7b82f2faada8418d8877a74fa0bf4a4597bd1b06d2fecea671ecf522e2717795" gracePeriod=30 Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.698340 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"d2b5b62d-c86d-4e14-a9ff-b41300b16a72","Type":"ContainerStarted","Data":"bdcf7b6dc47d643d3506a3b170aab96f7245504d676472e8d416a761b2dce189"} Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.706089 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-cbwwc" event={"ID":"5e005c0f-47c9-4e9a-a788-6a9ec4c8db31","Type":"ContainerStarted","Data":"52d00e92708fd941e776114792bd3513c0b495c32e260958d01d56151406181d"} Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.715134 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=4.421108447 podStartE2EDuration="13.715114653s" podCreationTimestamp="2025-11-25 10:00:39 +0000 UTC" firstStartedPulling="2025-11-25 10:00:41.152953621 +0000 UTC m=+1447.005946997" lastFinishedPulling="2025-11-25 10:00:50.446959827 +0000 UTC m=+1456.299953203" observedRunningTime="2025-11-25 10:00:52.712633534 +0000 UTC m=+1458.565626920" watchObservedRunningTime="2025-11-25 10:00:52.715114653 +0000 UTC m=+1458.568108029" Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.715558 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"812423ef-93ee-4ed5-80d9-6429fb098b06","Type":"ContainerStarted","Data":"df9ca6fa57f4847e5ff1adb1b48549a8fca910546b72c771701ef7614bbf38bd"} Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.715605 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"812423ef-93ee-4ed5-80d9-6429fb098b06","Type":"ContainerStarted","Data":"4ab6d9c8e9e1214e98c36ad721d6af9e8cd259c2bf54fc158160a8d2aab3758f"} Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.718984 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"97e394d1-03c3-467a-84b3-daf51739d393","Type":"ContainerStarted","Data":"7766c365acdfbb46241f84834fd23ee08d973f52c05db08dbc9a32fb2ded376a"} Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.719161 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="97e394d1-03c3-467a-84b3-daf51739d393" containerName="nova-metadata-log" containerID="cri-o://1a339f85a50186188f248e634fa59dbc2d7892af64037b2a96423d487df4076a" gracePeriod=30 Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.719426 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="97e394d1-03c3-467a-84b3-daf51739d393" containerName="nova-metadata-metadata" containerID="cri-o://7766c365acdfbb46241f84834fd23ee08d973f52c05db08dbc9a32fb2ded376a" gracePeriod=30 Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.728491 4854 generic.go:334] "Generic (PLEG): container finished" podID="b3722d7b-30f5-414d-b947-9be5a8494449" containerID="b0d047cca42fd9d3599491104df3f727dc0566c60422fcde475d13bdfb653ff4" exitCode=0 Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.728562 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-wg4tc" event={"ID":"b3722d7b-30f5-414d-b947-9be5a8494449","Type":"ContainerDied","Data":"b0d047cca42fd9d3599491104df3f727dc0566c60422fcde475d13bdfb653ff4"} Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.743768 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-sync-cbwwc" podStartSLOduration=2.56935922 podStartE2EDuration="12.743744795s" podCreationTimestamp="2025-11-25 10:00:40 +0000 UTC" firstStartedPulling="2025-11-25 10:00:41.871266845 +0000 UTC m=+1447.724260221" lastFinishedPulling="2025-11-25 10:00:52.04565242 +0000 UTC m=+1457.898645796" observedRunningTime="2025-11-25 10:00:52.736383391 +0000 UTC m=+1458.589376767" watchObservedRunningTime="2025-11-25 10:00:52.743744795 +0000 UTC m=+1458.596738181" Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.744390 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688b9f5b49-k6p96" event={"ID":"9ff2c539-c2ac-4420-b11e-ba3c88af56be","Type":"ContainerDied","Data":"fa010a79ae8e0c040d0e0ef4fe454a2096e707dfb137f8fb08b6de756aab50da"} Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.744438 4854 scope.go:117] "RemoveContainer" containerID="4c86179bd6bab2075dddfa8a94732a16d2f9183fbcbd2217d914610f2ffa5a4d" Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.744572 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688b9f5b49-k6p96" Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.773423 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=4.330098408 podStartE2EDuration="13.773406826s" podCreationTimestamp="2025-11-25 10:00:39 +0000 UTC" firstStartedPulling="2025-11-25 10:00:41.11965544 +0000 UTC m=+1446.972648816" lastFinishedPulling="2025-11-25 10:00:50.562963858 +0000 UTC m=+1456.415957234" observedRunningTime="2025-11-25 10:00:52.750524853 +0000 UTC m=+1458.603518229" watchObservedRunningTime="2025-11-25 10:00:52.773406826 +0000 UTC m=+1458.626400202" Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.794219 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=4.190612048 podStartE2EDuration="13.794200131s" podCreationTimestamp="2025-11-25 10:00:39 +0000 UTC" firstStartedPulling="2025-11-25 10:00:40.816273835 +0000 UTC m=+1446.669267211" lastFinishedPulling="2025-11-25 10:00:50.419861878 +0000 UTC m=+1456.272855294" observedRunningTime="2025-11-25 10:00:52.772237473 +0000 UTC m=+1458.625230869" watchObservedRunningTime="2025-11-25 10:00:52.794200131 +0000 UTC m=+1458.647193507" Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.797460 4854 scope.go:117] "RemoveContainer" containerID="ad3ce8348ec444b7bf52af5f8a59b448785767344363526226c975826aba8488" Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.815965 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=5.093605603 podStartE2EDuration="13.815949223s" podCreationTimestamp="2025-11-25 10:00:39 +0000 UTC" firstStartedPulling="2025-11-25 10:00:41.725484232 +0000 UTC m=+1447.578477608" lastFinishedPulling="2025-11-25 10:00:50.447827852 +0000 UTC m=+1456.300821228" observedRunningTime="2025-11-25 10:00:52.805610996 +0000 UTC m=+1458.658604372" watchObservedRunningTime="2025-11-25 10:00:52.815949223 +0000 UTC m=+1458.668942599" Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.941305 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-688b9f5b49-k6p96"] Nov 25 10:00:52 crc kubenswrapper[4854]: I1125 10:00:52.958611 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-688b9f5b49-k6p96"] Nov 25 10:00:53 crc kubenswrapper[4854]: I1125 10:00:53.026131 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9ff2c539-c2ac-4420-b11e-ba3c88af56be" path="/var/lib/kubelet/pods/9ff2c539-c2ac-4420-b11e-ba3c88af56be/volumes" Nov 25 10:00:53 crc kubenswrapper[4854]: I1125 10:00:53.755048 4854 generic.go:334] "Generic (PLEG): container finished" podID="97e394d1-03c3-467a-84b3-daf51739d393" containerID="1a339f85a50186188f248e634fa59dbc2d7892af64037b2a96423d487df4076a" exitCode=143 Nov 25 10:00:53 crc kubenswrapper[4854]: I1125 10:00:53.755122 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"97e394d1-03c3-467a-84b3-daf51739d393","Type":"ContainerDied","Data":"1a339f85a50186188f248e634fa59dbc2d7892af64037b2a96423d487df4076a"} Nov 25 10:00:54 crc kubenswrapper[4854]: I1125 10:00:54.213338 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-wg4tc" Nov 25 10:00:54 crc kubenswrapper[4854]: I1125 10:00:54.324828 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3722d7b-30f5-414d-b947-9be5a8494449-scripts\") pod \"b3722d7b-30f5-414d-b947-9be5a8494449\" (UID: \"b3722d7b-30f5-414d-b947-9be5a8494449\") " Nov 25 10:00:54 crc kubenswrapper[4854]: I1125 10:00:54.324992 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3722d7b-30f5-414d-b947-9be5a8494449-combined-ca-bundle\") pod \"b3722d7b-30f5-414d-b947-9be5a8494449\" (UID: \"b3722d7b-30f5-414d-b947-9be5a8494449\") " Nov 25 10:00:54 crc kubenswrapper[4854]: I1125 10:00:54.325110 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3722d7b-30f5-414d-b947-9be5a8494449-config-data\") pod \"b3722d7b-30f5-414d-b947-9be5a8494449\" (UID: \"b3722d7b-30f5-414d-b947-9be5a8494449\") " Nov 25 10:00:54 crc kubenswrapper[4854]: I1125 10:00:54.325309 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z4rpb\" (UniqueName: \"kubernetes.io/projected/b3722d7b-30f5-414d-b947-9be5a8494449-kube-api-access-z4rpb\") pod \"b3722d7b-30f5-414d-b947-9be5a8494449\" (UID: \"b3722d7b-30f5-414d-b947-9be5a8494449\") " Nov 25 10:00:54 crc kubenswrapper[4854]: I1125 10:00:54.354822 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3722d7b-30f5-414d-b947-9be5a8494449-scripts" (OuterVolumeSpecName: "scripts") pod "b3722d7b-30f5-414d-b947-9be5a8494449" (UID: "b3722d7b-30f5-414d-b947-9be5a8494449"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:00:54 crc kubenswrapper[4854]: I1125 10:00:54.356348 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3722d7b-30f5-414d-b947-9be5a8494449-kube-api-access-z4rpb" (OuterVolumeSpecName: "kube-api-access-z4rpb") pod "b3722d7b-30f5-414d-b947-9be5a8494449" (UID: "b3722d7b-30f5-414d-b947-9be5a8494449"). InnerVolumeSpecName "kube-api-access-z4rpb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:00:54 crc kubenswrapper[4854]: I1125 10:00:54.387379 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3722d7b-30f5-414d-b947-9be5a8494449-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b3722d7b-30f5-414d-b947-9be5a8494449" (UID: "b3722d7b-30f5-414d-b947-9be5a8494449"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:00:54 crc kubenswrapper[4854]: I1125 10:00:54.431469 4854 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3722d7b-30f5-414d-b947-9be5a8494449-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:54 crc kubenswrapper[4854]: I1125 10:00:54.431503 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3722d7b-30f5-414d-b947-9be5a8494449-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:54 crc kubenswrapper[4854]: I1125 10:00:54.431515 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z4rpb\" (UniqueName: \"kubernetes.io/projected/b3722d7b-30f5-414d-b947-9be5a8494449-kube-api-access-z4rpb\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:54 crc kubenswrapper[4854]: I1125 10:00:54.439788 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3722d7b-30f5-414d-b947-9be5a8494449-config-data" (OuterVolumeSpecName: "config-data") pod "b3722d7b-30f5-414d-b947-9be5a8494449" (UID: "b3722d7b-30f5-414d-b947-9be5a8494449"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:00:54 crc kubenswrapper[4854]: I1125 10:00:54.533780 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3722d7b-30f5-414d-b947-9be5a8494449-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:54 crc kubenswrapper[4854]: I1125 10:00:54.769841 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-wg4tc" event={"ID":"b3722d7b-30f5-414d-b947-9be5a8494449","Type":"ContainerDied","Data":"be39e538154279035aa3bf7e4c83c2e3be8f4ef7723fa2f888648bc4f3f6a7f1"} Nov 25 10:00:54 crc kubenswrapper[4854]: I1125 10:00:54.769903 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-wg4tc" Nov 25 10:00:54 crc kubenswrapper[4854]: I1125 10:00:54.769912 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="be39e538154279035aa3bf7e4c83c2e3be8f4ef7723fa2f888648bc4f3f6a7f1" Nov 25 10:00:55 crc kubenswrapper[4854]: I1125 10:00:55.031689 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:00:55 crc kubenswrapper[4854]: I1125 10:00:55.032291 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="d2b5b62d-c86d-4e14-a9ff-b41300b16a72" containerName="nova-scheduler-scheduler" containerID="cri-o://bdcf7b6dc47d643d3506a3b170aab96f7245504d676472e8d416a761b2dce189" gracePeriod=30 Nov 25 10:00:55 crc kubenswrapper[4854]: I1125 10:00:55.052229 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:00:55 crc kubenswrapper[4854]: I1125 10:00:55.052494 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="812423ef-93ee-4ed5-80d9-6429fb098b06" containerName="nova-api-log" containerID="cri-o://4ab6d9c8e9e1214e98c36ad721d6af9e8cd259c2bf54fc158160a8d2aab3758f" gracePeriod=30 Nov 25 10:00:55 crc kubenswrapper[4854]: I1125 10:00:55.052554 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="812423ef-93ee-4ed5-80d9-6429fb098b06" containerName="nova-api-api" containerID="cri-o://df9ca6fa57f4847e5ff1adb1b48549a8fca910546b72c771701ef7614bbf38bd" gracePeriod=30 Nov 25 10:00:55 crc kubenswrapper[4854]: I1125 10:00:55.166423 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 25 10:00:55 crc kubenswrapper[4854]: I1125 10:00:55.364441 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:00:55 crc kubenswrapper[4854]: I1125 10:00:55.472980 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 10:00:55 crc kubenswrapper[4854]: I1125 10:00:55.473031 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 10:00:55 crc kubenswrapper[4854]: I1125 10:00:55.781932 4854 generic.go:334] "Generic (PLEG): container finished" podID="812423ef-93ee-4ed5-80d9-6429fb098b06" containerID="df9ca6fa57f4847e5ff1adb1b48549a8fca910546b72c771701ef7614bbf38bd" exitCode=0 Nov 25 10:00:55 crc kubenswrapper[4854]: I1125 10:00:55.781964 4854 generic.go:334] "Generic (PLEG): container finished" podID="812423ef-93ee-4ed5-80d9-6429fb098b06" containerID="4ab6d9c8e9e1214e98c36ad721d6af9e8cd259c2bf54fc158160a8d2aab3758f" exitCode=143 Nov 25 10:00:55 crc kubenswrapper[4854]: I1125 10:00:55.781984 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"812423ef-93ee-4ed5-80d9-6429fb098b06","Type":"ContainerDied","Data":"df9ca6fa57f4847e5ff1adb1b48549a8fca910546b72c771701ef7614bbf38bd"} Nov 25 10:00:55 crc kubenswrapper[4854]: I1125 10:00:55.782011 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"812423ef-93ee-4ed5-80d9-6429fb098b06","Type":"ContainerDied","Data":"4ab6d9c8e9e1214e98c36ad721d6af9e8cd259c2bf54fc158160a8d2aab3758f"} Nov 25 10:00:56 crc kubenswrapper[4854]: E1125 10:00:56.364551 4854 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda16432da_436b_4d4c_b383_e009ff4a4ff6.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda16432da_436b_4d4c_b383_e009ff4a4ff6.slice/crio-fc1ce70cb30329b8b1d10d10efe6bb7b931f1af836e0f1bdb147f97c6bbd3eec\": RecentStats: unable to find data in memory cache]" Nov 25 10:00:56 crc kubenswrapper[4854]: I1125 10:00:56.476128 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:00:56 crc kubenswrapper[4854]: I1125 10:00:56.582040 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z9lxr\" (UniqueName: \"kubernetes.io/projected/812423ef-93ee-4ed5-80d9-6429fb098b06-kube-api-access-z9lxr\") pod \"812423ef-93ee-4ed5-80d9-6429fb098b06\" (UID: \"812423ef-93ee-4ed5-80d9-6429fb098b06\") " Nov 25 10:00:56 crc kubenswrapper[4854]: I1125 10:00:56.582525 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/812423ef-93ee-4ed5-80d9-6429fb098b06-logs\") pod \"812423ef-93ee-4ed5-80d9-6429fb098b06\" (UID: \"812423ef-93ee-4ed5-80d9-6429fb098b06\") " Nov 25 10:00:56 crc kubenswrapper[4854]: I1125 10:00:56.582629 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/812423ef-93ee-4ed5-80d9-6429fb098b06-combined-ca-bundle\") pod \"812423ef-93ee-4ed5-80d9-6429fb098b06\" (UID: \"812423ef-93ee-4ed5-80d9-6429fb098b06\") " Nov 25 10:00:56 crc kubenswrapper[4854]: I1125 10:00:56.582736 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/812423ef-93ee-4ed5-80d9-6429fb098b06-config-data\") pod \"812423ef-93ee-4ed5-80d9-6429fb098b06\" (UID: \"812423ef-93ee-4ed5-80d9-6429fb098b06\") " Nov 25 10:00:56 crc kubenswrapper[4854]: I1125 10:00:56.582912 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/812423ef-93ee-4ed5-80d9-6429fb098b06-logs" (OuterVolumeSpecName: "logs") pod "812423ef-93ee-4ed5-80d9-6429fb098b06" (UID: "812423ef-93ee-4ed5-80d9-6429fb098b06"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:00:56 crc kubenswrapper[4854]: I1125 10:00:56.583498 4854 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/812423ef-93ee-4ed5-80d9-6429fb098b06-logs\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:56 crc kubenswrapper[4854]: I1125 10:00:56.601527 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/812423ef-93ee-4ed5-80d9-6429fb098b06-kube-api-access-z9lxr" (OuterVolumeSpecName: "kube-api-access-z9lxr") pod "812423ef-93ee-4ed5-80d9-6429fb098b06" (UID: "812423ef-93ee-4ed5-80d9-6429fb098b06"). InnerVolumeSpecName "kube-api-access-z9lxr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:00:56 crc kubenswrapper[4854]: I1125 10:00:56.620344 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/812423ef-93ee-4ed5-80d9-6429fb098b06-config-data" (OuterVolumeSpecName: "config-data") pod "812423ef-93ee-4ed5-80d9-6429fb098b06" (UID: "812423ef-93ee-4ed5-80d9-6429fb098b06"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:00:56 crc kubenswrapper[4854]: I1125 10:00:56.634857 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/812423ef-93ee-4ed5-80d9-6429fb098b06-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "812423ef-93ee-4ed5-80d9-6429fb098b06" (UID: "812423ef-93ee-4ed5-80d9-6429fb098b06"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:00:56 crc kubenswrapper[4854]: I1125 10:00:56.686231 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/812423ef-93ee-4ed5-80d9-6429fb098b06-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:56 crc kubenswrapper[4854]: I1125 10:00:56.686277 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/812423ef-93ee-4ed5-80d9-6429fb098b06-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:56 crc kubenswrapper[4854]: I1125 10:00:56.686290 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z9lxr\" (UniqueName: \"kubernetes.io/projected/812423ef-93ee-4ed5-80d9-6429fb098b06-kube-api-access-z9lxr\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:56 crc kubenswrapper[4854]: I1125 10:00:56.796089 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"812423ef-93ee-4ed5-80d9-6429fb098b06","Type":"ContainerDied","Data":"866d1d36d52c4588b10ee5003a4f9297494a8e4f5e7db0a504578ff6b3eb6617"} Nov 25 10:00:56 crc kubenswrapper[4854]: I1125 10:00:56.796166 4854 scope.go:117] "RemoveContainer" containerID="df9ca6fa57f4847e5ff1adb1b48549a8fca910546b72c771701ef7614bbf38bd" Nov 25 10:00:56 crc kubenswrapper[4854]: I1125 10:00:56.796441 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:00:56 crc kubenswrapper[4854]: I1125 10:00:56.813719 4854 generic.go:334] "Generic (PLEG): container finished" podID="5e005c0f-47c9-4e9a-a788-6a9ec4c8db31" containerID="52d00e92708fd941e776114792bd3513c0b495c32e260958d01d56151406181d" exitCode=0 Nov 25 10:00:56 crc kubenswrapper[4854]: I1125 10:00:56.813766 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-cbwwc" event={"ID":"5e005c0f-47c9-4e9a-a788-6a9ec4c8db31","Type":"ContainerDied","Data":"52d00e92708fd941e776114792bd3513c0b495c32e260958d01d56151406181d"} Nov 25 10:00:56 crc kubenswrapper[4854]: I1125 10:00:56.843183 4854 scope.go:117] "RemoveContainer" containerID="4ab6d9c8e9e1214e98c36ad721d6af9e8cd259c2bf54fc158160a8d2aab3758f" Nov 25 10:00:56 crc kubenswrapper[4854]: I1125 10:00:56.867838 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:00:56 crc kubenswrapper[4854]: I1125 10:00:56.897647 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:00:56 crc kubenswrapper[4854]: I1125 10:00:56.923474 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 10:00:56 crc kubenswrapper[4854]: E1125 10:00:56.924190 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="812423ef-93ee-4ed5-80d9-6429fb098b06" containerName="nova-api-log" Nov 25 10:00:56 crc kubenswrapper[4854]: I1125 10:00:56.924214 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="812423ef-93ee-4ed5-80d9-6429fb098b06" containerName="nova-api-log" Nov 25 10:00:56 crc kubenswrapper[4854]: E1125 10:00:56.924224 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="812423ef-93ee-4ed5-80d9-6429fb098b06" containerName="nova-api-api" Nov 25 10:00:56 crc kubenswrapper[4854]: I1125 10:00:56.924230 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="812423ef-93ee-4ed5-80d9-6429fb098b06" containerName="nova-api-api" Nov 25 10:00:56 crc kubenswrapper[4854]: E1125 10:00:56.924260 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ff2c539-c2ac-4420-b11e-ba3c88af56be" containerName="dnsmasq-dns" Nov 25 10:00:56 crc kubenswrapper[4854]: I1125 10:00:56.924268 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ff2c539-c2ac-4420-b11e-ba3c88af56be" containerName="dnsmasq-dns" Nov 25 10:00:56 crc kubenswrapper[4854]: E1125 10:00:56.924279 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ff2c539-c2ac-4420-b11e-ba3c88af56be" containerName="init" Nov 25 10:00:56 crc kubenswrapper[4854]: I1125 10:00:56.924285 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ff2c539-c2ac-4420-b11e-ba3c88af56be" containerName="init" Nov 25 10:00:56 crc kubenswrapper[4854]: E1125 10:00:56.924298 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3722d7b-30f5-414d-b947-9be5a8494449" containerName="nova-manage" Nov 25 10:00:56 crc kubenswrapper[4854]: I1125 10:00:56.924306 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3722d7b-30f5-414d-b947-9be5a8494449" containerName="nova-manage" Nov 25 10:00:56 crc kubenswrapper[4854]: I1125 10:00:56.924525 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="812423ef-93ee-4ed5-80d9-6429fb098b06" containerName="nova-api-api" Nov 25 10:00:56 crc kubenswrapper[4854]: I1125 10:00:56.924539 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="812423ef-93ee-4ed5-80d9-6429fb098b06" containerName="nova-api-log" Nov 25 10:00:56 crc kubenswrapper[4854]: I1125 10:00:56.924553 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3722d7b-30f5-414d-b947-9be5a8494449" containerName="nova-manage" Nov 25 10:00:56 crc kubenswrapper[4854]: I1125 10:00:56.924572 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ff2c539-c2ac-4420-b11e-ba3c88af56be" containerName="dnsmasq-dns" Nov 25 10:00:56 crc kubenswrapper[4854]: I1125 10:00:56.926519 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:00:56 crc kubenswrapper[4854]: I1125 10:00:56.928395 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 10:00:56 crc kubenswrapper[4854]: I1125 10:00:56.935910 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:00:56 crc kubenswrapper[4854]: I1125 10:00:56.991972 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zb9dw\" (UniqueName: \"kubernetes.io/projected/9064c524-7e0e-4327-ae10-b531d78c450b-kube-api-access-zb9dw\") pod \"nova-api-0\" (UID: \"9064c524-7e0e-4327-ae10-b531d78c450b\") " pod="openstack/nova-api-0" Nov 25 10:00:56 crc kubenswrapper[4854]: I1125 10:00:56.992046 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9064c524-7e0e-4327-ae10-b531d78c450b-config-data\") pod \"nova-api-0\" (UID: \"9064c524-7e0e-4327-ae10-b531d78c450b\") " pod="openstack/nova-api-0" Nov 25 10:00:56 crc kubenswrapper[4854]: I1125 10:00:56.992190 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9064c524-7e0e-4327-ae10-b531d78c450b-logs\") pod \"nova-api-0\" (UID: \"9064c524-7e0e-4327-ae10-b531d78c450b\") " pod="openstack/nova-api-0" Nov 25 10:00:56 crc kubenswrapper[4854]: I1125 10:00:56.992220 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9064c524-7e0e-4327-ae10-b531d78c450b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"9064c524-7e0e-4327-ae10-b531d78c450b\") " pod="openstack/nova-api-0" Nov 25 10:00:57 crc kubenswrapper[4854]: I1125 10:00:57.029744 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="812423ef-93ee-4ed5-80d9-6429fb098b06" path="/var/lib/kubelet/pods/812423ef-93ee-4ed5-80d9-6429fb098b06/volumes" Nov 25 10:00:57 crc kubenswrapper[4854]: I1125 10:00:57.094856 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9064c524-7e0e-4327-ae10-b531d78c450b-logs\") pod \"nova-api-0\" (UID: \"9064c524-7e0e-4327-ae10-b531d78c450b\") " pod="openstack/nova-api-0" Nov 25 10:00:57 crc kubenswrapper[4854]: I1125 10:00:57.094907 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9064c524-7e0e-4327-ae10-b531d78c450b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"9064c524-7e0e-4327-ae10-b531d78c450b\") " pod="openstack/nova-api-0" Nov 25 10:00:57 crc kubenswrapper[4854]: I1125 10:00:57.095145 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zb9dw\" (UniqueName: \"kubernetes.io/projected/9064c524-7e0e-4327-ae10-b531d78c450b-kube-api-access-zb9dw\") pod \"nova-api-0\" (UID: \"9064c524-7e0e-4327-ae10-b531d78c450b\") " pod="openstack/nova-api-0" Nov 25 10:00:57 crc kubenswrapper[4854]: I1125 10:00:57.095626 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9064c524-7e0e-4327-ae10-b531d78c450b-config-data\") pod \"nova-api-0\" (UID: \"9064c524-7e0e-4327-ae10-b531d78c450b\") " pod="openstack/nova-api-0" Nov 25 10:00:57 crc kubenswrapper[4854]: I1125 10:00:57.096059 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9064c524-7e0e-4327-ae10-b531d78c450b-logs\") pod \"nova-api-0\" (UID: \"9064c524-7e0e-4327-ae10-b531d78c450b\") " pod="openstack/nova-api-0" Nov 25 10:00:57 crc kubenswrapper[4854]: I1125 10:00:57.100428 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9064c524-7e0e-4327-ae10-b531d78c450b-config-data\") pod \"nova-api-0\" (UID: \"9064c524-7e0e-4327-ae10-b531d78c450b\") " pod="openstack/nova-api-0" Nov 25 10:00:57 crc kubenswrapper[4854]: I1125 10:00:57.101503 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9064c524-7e0e-4327-ae10-b531d78c450b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"9064c524-7e0e-4327-ae10-b531d78c450b\") " pod="openstack/nova-api-0" Nov 25 10:00:57 crc kubenswrapper[4854]: I1125 10:00:57.114175 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zb9dw\" (UniqueName: \"kubernetes.io/projected/9064c524-7e0e-4327-ae10-b531d78c450b-kube-api-access-zb9dw\") pod \"nova-api-0\" (UID: \"9064c524-7e0e-4327-ae10-b531d78c450b\") " pod="openstack/nova-api-0" Nov 25 10:00:57 crc kubenswrapper[4854]: I1125 10:00:57.257632 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:00:57 crc kubenswrapper[4854]: W1125 10:00:57.790886 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9064c524_7e0e_4327_ae10_b531d78c450b.slice/crio-f81c4eebc8612d0ad2cf5522672c05fda35477895f396cac8bfab7b11496cab4 WatchSource:0}: Error finding container f81c4eebc8612d0ad2cf5522672c05fda35477895f396cac8bfab7b11496cab4: Status 404 returned error can't find the container with id f81c4eebc8612d0ad2cf5522672c05fda35477895f396cac8bfab7b11496cab4 Nov 25 10:00:57 crc kubenswrapper[4854]: I1125 10:00:57.793095 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:00:57 crc kubenswrapper[4854]: I1125 10:00:57.825891 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9064c524-7e0e-4327-ae10-b531d78c450b","Type":"ContainerStarted","Data":"f81c4eebc8612d0ad2cf5522672c05fda35477895f396cac8bfab7b11496cab4"} Nov 25 10:00:58 crc kubenswrapper[4854]: I1125 10:00:58.263007 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-cbwwc" Nov 25 10:00:58 crc kubenswrapper[4854]: I1125 10:00:58.328280 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e005c0f-47c9-4e9a-a788-6a9ec4c8db31-combined-ca-bundle\") pod \"5e005c0f-47c9-4e9a-a788-6a9ec4c8db31\" (UID: \"5e005c0f-47c9-4e9a-a788-6a9ec4c8db31\") " Nov 25 10:00:58 crc kubenswrapper[4854]: I1125 10:00:58.329349 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kh8ql\" (UniqueName: \"kubernetes.io/projected/5e005c0f-47c9-4e9a-a788-6a9ec4c8db31-kube-api-access-kh8ql\") pod \"5e005c0f-47c9-4e9a-a788-6a9ec4c8db31\" (UID: \"5e005c0f-47c9-4e9a-a788-6a9ec4c8db31\") " Nov 25 10:00:58 crc kubenswrapper[4854]: I1125 10:00:58.329406 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e005c0f-47c9-4e9a-a788-6a9ec4c8db31-scripts\") pod \"5e005c0f-47c9-4e9a-a788-6a9ec4c8db31\" (UID: \"5e005c0f-47c9-4e9a-a788-6a9ec4c8db31\") " Nov 25 10:00:58 crc kubenswrapper[4854]: I1125 10:00:58.329475 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e005c0f-47c9-4e9a-a788-6a9ec4c8db31-config-data\") pod \"5e005c0f-47c9-4e9a-a788-6a9ec4c8db31\" (UID: \"5e005c0f-47c9-4e9a-a788-6a9ec4c8db31\") " Nov 25 10:00:58 crc kubenswrapper[4854]: I1125 10:00:58.333032 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e005c0f-47c9-4e9a-a788-6a9ec4c8db31-scripts" (OuterVolumeSpecName: "scripts") pod "5e005c0f-47c9-4e9a-a788-6a9ec4c8db31" (UID: "5e005c0f-47c9-4e9a-a788-6a9ec4c8db31"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:00:58 crc kubenswrapper[4854]: I1125 10:00:58.341060 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5e005c0f-47c9-4e9a-a788-6a9ec4c8db31-kube-api-access-kh8ql" (OuterVolumeSpecName: "kube-api-access-kh8ql") pod "5e005c0f-47c9-4e9a-a788-6a9ec4c8db31" (UID: "5e005c0f-47c9-4e9a-a788-6a9ec4c8db31"). InnerVolumeSpecName "kube-api-access-kh8ql". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:00:58 crc kubenswrapper[4854]: I1125 10:00:58.431218 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e005c0f-47c9-4e9a-a788-6a9ec4c8db31-config-data" (OuterVolumeSpecName: "config-data") pod "5e005c0f-47c9-4e9a-a788-6a9ec4c8db31" (UID: "5e005c0f-47c9-4e9a-a788-6a9ec4c8db31"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:00:58 crc kubenswrapper[4854]: I1125 10:00:58.432708 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kh8ql\" (UniqueName: \"kubernetes.io/projected/5e005c0f-47c9-4e9a-a788-6a9ec4c8db31-kube-api-access-kh8ql\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:58 crc kubenswrapper[4854]: I1125 10:00:58.432740 4854 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e005c0f-47c9-4e9a-a788-6a9ec4c8db31-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:58 crc kubenswrapper[4854]: I1125 10:00:58.432754 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e005c0f-47c9-4e9a-a788-6a9ec4c8db31-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:58 crc kubenswrapper[4854]: I1125 10:00:58.439878 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e005c0f-47c9-4e9a-a788-6a9ec4c8db31-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5e005c0f-47c9-4e9a-a788-6a9ec4c8db31" (UID: "5e005c0f-47c9-4e9a-a788-6a9ec4c8db31"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:00:58 crc kubenswrapper[4854]: I1125 10:00:58.535809 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e005c0f-47c9-4e9a-a788-6a9ec4c8db31-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:58 crc kubenswrapper[4854]: I1125 10:00:58.839543 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-cbwwc" event={"ID":"5e005c0f-47c9-4e9a-a788-6a9ec4c8db31","Type":"ContainerDied","Data":"37fa4fe5cadb21b7f5f9ab38ba7e2222b6bc3328efd5e12cc2c20d1310c59cf9"} Nov 25 10:00:58 crc kubenswrapper[4854]: I1125 10:00:58.839566 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-cbwwc" Nov 25 10:00:58 crc kubenswrapper[4854]: I1125 10:00:58.839588 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="37fa4fe5cadb21b7f5f9ab38ba7e2222b6bc3328efd5e12cc2c20d1310c59cf9" Nov 25 10:00:58 crc kubenswrapper[4854]: I1125 10:00:58.842935 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9064c524-7e0e-4327-ae10-b531d78c450b","Type":"ContainerStarted","Data":"e262bc08593155d85adc8bedd60ad33f23acf2d35d857aecac9f001124d77d7a"} Nov 25 10:00:58 crc kubenswrapper[4854]: I1125 10:00:58.843037 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9064c524-7e0e-4327-ae10-b531d78c450b","Type":"ContainerStarted","Data":"e22958146f2cdc3537831dcf65e2566e5f17adf8e70f6c5cdc2081b29b44397a"} Nov 25 10:00:58 crc kubenswrapper[4854]: I1125 10:00:58.906969 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.906933028 podStartE2EDuration="2.906933028s" podCreationTimestamp="2025-11-25 10:00:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:00:58.86723543 +0000 UTC m=+1464.720228806" watchObservedRunningTime="2025-11-25 10:00:58.906933028 +0000 UTC m=+1464.759926434" Nov 25 10:00:59 crc kubenswrapper[4854]: I1125 10:00:59.860201 4854 generic.go:334] "Generic (PLEG): container finished" podID="5b769df1-897a-4560-a18d-cbb642930a72" containerID="3c11e766e45c2c78b8029ebcb599b81e34371c452f7c6fba4eab4be4b6855590" exitCode=0 Nov 25 10:00:59 crc kubenswrapper[4854]: I1125 10:00:59.860288 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-6k8ck" event={"ID":"5b769df1-897a-4560-a18d-cbb642930a72","Type":"ContainerDied","Data":"3c11e766e45c2c78b8029ebcb599b81e34371c452f7c6fba4eab4be4b6855590"} Nov 25 10:01:00 crc kubenswrapper[4854]: I1125 10:01:00.144065 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29401081-jn8kd"] Nov 25 10:01:00 crc kubenswrapper[4854]: E1125 10:01:00.145035 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e005c0f-47c9-4e9a-a788-6a9ec4c8db31" containerName="aodh-db-sync" Nov 25 10:01:00 crc kubenswrapper[4854]: I1125 10:01:00.145158 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e005c0f-47c9-4e9a-a788-6a9ec4c8db31" containerName="aodh-db-sync" Nov 25 10:01:00 crc kubenswrapper[4854]: I1125 10:01:00.145507 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="5e005c0f-47c9-4e9a-a788-6a9ec4c8db31" containerName="aodh-db-sync" Nov 25 10:01:00 crc kubenswrapper[4854]: I1125 10:01:00.146524 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401081-jn8kd" Nov 25 10:01:00 crc kubenswrapper[4854]: I1125 10:01:00.156622 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29401081-jn8kd"] Nov 25 10:01:00 crc kubenswrapper[4854]: I1125 10:01:00.278117 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee7b8559-e810-42ef-99b7-e206a817fd29-config-data\") pod \"keystone-cron-29401081-jn8kd\" (UID: \"ee7b8559-e810-42ef-99b7-e206a817fd29\") " pod="openstack/keystone-cron-29401081-jn8kd" Nov 25 10:01:00 crc kubenswrapper[4854]: I1125 10:01:00.278225 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ee7b8559-e810-42ef-99b7-e206a817fd29-fernet-keys\") pod \"keystone-cron-29401081-jn8kd\" (UID: \"ee7b8559-e810-42ef-99b7-e206a817fd29\") " pod="openstack/keystone-cron-29401081-jn8kd" Nov 25 10:01:00 crc kubenswrapper[4854]: I1125 10:01:00.278344 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee7b8559-e810-42ef-99b7-e206a817fd29-combined-ca-bundle\") pod \"keystone-cron-29401081-jn8kd\" (UID: \"ee7b8559-e810-42ef-99b7-e206a817fd29\") " pod="openstack/keystone-cron-29401081-jn8kd" Nov 25 10:01:00 crc kubenswrapper[4854]: I1125 10:01:00.278411 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wtv97\" (UniqueName: \"kubernetes.io/projected/ee7b8559-e810-42ef-99b7-e206a817fd29-kube-api-access-wtv97\") pod \"keystone-cron-29401081-jn8kd\" (UID: \"ee7b8559-e810-42ef-99b7-e206a817fd29\") " pod="openstack/keystone-cron-29401081-jn8kd" Nov 25 10:01:00 crc kubenswrapper[4854]: I1125 10:01:00.352306 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Nov 25 10:01:00 crc kubenswrapper[4854]: I1125 10:01:00.357239 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 25 10:01:00 crc kubenswrapper[4854]: I1125 10:01:00.361179 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Nov 25 10:01:00 crc kubenswrapper[4854]: I1125 10:01:00.361424 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-jzshp" Nov 25 10:01:00 crc kubenswrapper[4854]: I1125 10:01:00.361538 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Nov 25 10:01:00 crc kubenswrapper[4854]: I1125 10:01:00.372638 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 25 10:01:00 crc kubenswrapper[4854]: I1125 10:01:00.383780 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ee7b8559-e810-42ef-99b7-e206a817fd29-fernet-keys\") pod \"keystone-cron-29401081-jn8kd\" (UID: \"ee7b8559-e810-42ef-99b7-e206a817fd29\") " pod="openstack/keystone-cron-29401081-jn8kd" Nov 25 10:01:00 crc kubenswrapper[4854]: I1125 10:01:00.383994 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee7b8559-e810-42ef-99b7-e206a817fd29-combined-ca-bundle\") pod \"keystone-cron-29401081-jn8kd\" (UID: \"ee7b8559-e810-42ef-99b7-e206a817fd29\") " pod="openstack/keystone-cron-29401081-jn8kd" Nov 25 10:01:00 crc kubenswrapper[4854]: I1125 10:01:00.384088 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wtv97\" (UniqueName: \"kubernetes.io/projected/ee7b8559-e810-42ef-99b7-e206a817fd29-kube-api-access-wtv97\") pod \"keystone-cron-29401081-jn8kd\" (UID: \"ee7b8559-e810-42ef-99b7-e206a817fd29\") " pod="openstack/keystone-cron-29401081-jn8kd" Nov 25 10:01:00 crc kubenswrapper[4854]: I1125 10:01:00.384153 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee7b8559-e810-42ef-99b7-e206a817fd29-config-data\") pod \"keystone-cron-29401081-jn8kd\" (UID: \"ee7b8559-e810-42ef-99b7-e206a817fd29\") " pod="openstack/keystone-cron-29401081-jn8kd" Nov 25 10:01:00 crc kubenswrapper[4854]: I1125 10:01:00.396737 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee7b8559-e810-42ef-99b7-e206a817fd29-combined-ca-bundle\") pod \"keystone-cron-29401081-jn8kd\" (UID: \"ee7b8559-e810-42ef-99b7-e206a817fd29\") " pod="openstack/keystone-cron-29401081-jn8kd" Nov 25 10:01:00 crc kubenswrapper[4854]: I1125 10:01:00.397510 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ee7b8559-e810-42ef-99b7-e206a817fd29-fernet-keys\") pod \"keystone-cron-29401081-jn8kd\" (UID: \"ee7b8559-e810-42ef-99b7-e206a817fd29\") " pod="openstack/keystone-cron-29401081-jn8kd" Nov 25 10:01:00 crc kubenswrapper[4854]: I1125 10:01:00.400260 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee7b8559-e810-42ef-99b7-e206a817fd29-config-data\") pod \"keystone-cron-29401081-jn8kd\" (UID: \"ee7b8559-e810-42ef-99b7-e206a817fd29\") " pod="openstack/keystone-cron-29401081-jn8kd" Nov 25 10:01:00 crc kubenswrapper[4854]: I1125 10:01:00.420896 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wtv97\" (UniqueName: \"kubernetes.io/projected/ee7b8559-e810-42ef-99b7-e206a817fd29-kube-api-access-wtv97\") pod \"keystone-cron-29401081-jn8kd\" (UID: \"ee7b8559-e810-42ef-99b7-e206a817fd29\") " pod="openstack/keystone-cron-29401081-jn8kd" Nov 25 10:01:00 crc kubenswrapper[4854]: I1125 10:01:00.482218 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401081-jn8kd" Nov 25 10:01:00 crc kubenswrapper[4854]: I1125 10:01:00.486692 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9b28076-56b0-47d7-a0b5-f82956ea494a-config-data\") pod \"aodh-0\" (UID: \"e9b28076-56b0-47d7-a0b5-f82956ea494a\") " pod="openstack/aodh-0" Nov 25 10:01:00 crc kubenswrapper[4854]: I1125 10:01:00.486760 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9b28076-56b0-47d7-a0b5-f82956ea494a-combined-ca-bundle\") pod \"aodh-0\" (UID: \"e9b28076-56b0-47d7-a0b5-f82956ea494a\") " pod="openstack/aodh-0" Nov 25 10:01:00 crc kubenswrapper[4854]: I1125 10:01:00.486801 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9b28076-56b0-47d7-a0b5-f82956ea494a-scripts\") pod \"aodh-0\" (UID: \"e9b28076-56b0-47d7-a0b5-f82956ea494a\") " pod="openstack/aodh-0" Nov 25 10:01:00 crc kubenswrapper[4854]: I1125 10:01:00.486996 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qfnmg\" (UniqueName: \"kubernetes.io/projected/e9b28076-56b0-47d7-a0b5-f82956ea494a-kube-api-access-qfnmg\") pod \"aodh-0\" (UID: \"e9b28076-56b0-47d7-a0b5-f82956ea494a\") " pod="openstack/aodh-0" Nov 25 10:01:00 crc kubenswrapper[4854]: I1125 10:01:00.589304 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9b28076-56b0-47d7-a0b5-f82956ea494a-config-data\") pod \"aodh-0\" (UID: \"e9b28076-56b0-47d7-a0b5-f82956ea494a\") " pod="openstack/aodh-0" Nov 25 10:01:00 crc kubenswrapper[4854]: I1125 10:01:00.589361 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9b28076-56b0-47d7-a0b5-f82956ea494a-combined-ca-bundle\") pod \"aodh-0\" (UID: \"e9b28076-56b0-47d7-a0b5-f82956ea494a\") " pod="openstack/aodh-0" Nov 25 10:01:00 crc kubenswrapper[4854]: I1125 10:01:00.589398 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9b28076-56b0-47d7-a0b5-f82956ea494a-scripts\") pod \"aodh-0\" (UID: \"e9b28076-56b0-47d7-a0b5-f82956ea494a\") " pod="openstack/aodh-0" Nov 25 10:01:00 crc kubenswrapper[4854]: I1125 10:01:00.589536 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qfnmg\" (UniqueName: \"kubernetes.io/projected/e9b28076-56b0-47d7-a0b5-f82956ea494a-kube-api-access-qfnmg\") pod \"aodh-0\" (UID: \"e9b28076-56b0-47d7-a0b5-f82956ea494a\") " pod="openstack/aodh-0" Nov 25 10:01:00 crc kubenswrapper[4854]: I1125 10:01:00.593807 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9b28076-56b0-47d7-a0b5-f82956ea494a-scripts\") pod \"aodh-0\" (UID: \"e9b28076-56b0-47d7-a0b5-f82956ea494a\") " pod="openstack/aodh-0" Nov 25 10:01:00 crc kubenswrapper[4854]: I1125 10:01:00.594494 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9b28076-56b0-47d7-a0b5-f82956ea494a-config-data\") pod \"aodh-0\" (UID: \"e9b28076-56b0-47d7-a0b5-f82956ea494a\") " pod="openstack/aodh-0" Nov 25 10:01:00 crc kubenswrapper[4854]: I1125 10:01:00.595448 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9b28076-56b0-47d7-a0b5-f82956ea494a-combined-ca-bundle\") pod \"aodh-0\" (UID: \"e9b28076-56b0-47d7-a0b5-f82956ea494a\") " pod="openstack/aodh-0" Nov 25 10:01:00 crc kubenswrapper[4854]: I1125 10:01:00.609907 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qfnmg\" (UniqueName: \"kubernetes.io/projected/e9b28076-56b0-47d7-a0b5-f82956ea494a-kube-api-access-qfnmg\") pod \"aodh-0\" (UID: \"e9b28076-56b0-47d7-a0b5-f82956ea494a\") " pod="openstack/aodh-0" Nov 25 10:01:00 crc kubenswrapper[4854]: I1125 10:01:00.674262 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 25 10:01:01 crc kubenswrapper[4854]: W1125 10:01:01.076296 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podee7b8559_e810_42ef_99b7_e206a817fd29.slice/crio-3e321a93e08fdcf0fb0c6fa47851b927831fa4e9527fddf2bbc3ab0eae95a9f2 WatchSource:0}: Error finding container 3e321a93e08fdcf0fb0c6fa47851b927831fa4e9527fddf2bbc3ab0eae95a9f2: Status 404 returned error can't find the container with id 3e321a93e08fdcf0fb0c6fa47851b927831fa4e9527fddf2bbc3ab0eae95a9f2 Nov 25 10:01:01 crc kubenswrapper[4854]: I1125 10:01:01.085299 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29401081-jn8kd"] Nov 25 10:01:01 crc kubenswrapper[4854]: W1125 10:01:01.345825 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode9b28076_56b0_47d7_a0b5_f82956ea494a.slice/crio-1ab9cadd9c09a3a6ec7138425aa7ed22e7bb15d7aa6ca60e9457c2b811ebc998 WatchSource:0}: Error finding container 1ab9cadd9c09a3a6ec7138425aa7ed22e7bb15d7aa6ca60e9457c2b811ebc998: Status 404 returned error can't find the container with id 1ab9cadd9c09a3a6ec7138425aa7ed22e7bb15d7aa6ca60e9457c2b811ebc998 Nov 25 10:01:01 crc kubenswrapper[4854]: I1125 10:01:01.364285 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 25 10:01:01 crc kubenswrapper[4854]: I1125 10:01:01.510986 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-6k8ck" Nov 25 10:01:01 crc kubenswrapper[4854]: E1125 10:01:01.555749 4854 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda16432da_436b_4d4c_b383_e009ff4a4ff6.slice/crio-fc1ce70cb30329b8b1d10d10efe6bb7b931f1af836e0f1bdb147f97c6bbd3eec\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda16432da_436b_4d4c_b383_e009ff4a4ff6.slice\": RecentStats: unable to find data in memory cache]" Nov 25 10:01:01 crc kubenswrapper[4854]: I1125 10:01:01.623431 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b769df1-897a-4560-a18d-cbb642930a72-config-data\") pod \"5b769df1-897a-4560-a18d-cbb642930a72\" (UID: \"5b769df1-897a-4560-a18d-cbb642930a72\") " Nov 25 10:01:01 crc kubenswrapper[4854]: I1125 10:01:01.623620 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b769df1-897a-4560-a18d-cbb642930a72-scripts\") pod \"5b769df1-897a-4560-a18d-cbb642930a72\" (UID: \"5b769df1-897a-4560-a18d-cbb642930a72\") " Nov 25 10:01:01 crc kubenswrapper[4854]: I1125 10:01:01.623662 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b769df1-897a-4560-a18d-cbb642930a72-combined-ca-bundle\") pod \"5b769df1-897a-4560-a18d-cbb642930a72\" (UID: \"5b769df1-897a-4560-a18d-cbb642930a72\") " Nov 25 10:01:01 crc kubenswrapper[4854]: I1125 10:01:01.623738 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xvm58\" (UniqueName: \"kubernetes.io/projected/5b769df1-897a-4560-a18d-cbb642930a72-kube-api-access-xvm58\") pod \"5b769df1-897a-4560-a18d-cbb642930a72\" (UID: \"5b769df1-897a-4560-a18d-cbb642930a72\") " Nov 25 10:01:01 crc kubenswrapper[4854]: I1125 10:01:01.635697 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b769df1-897a-4560-a18d-cbb642930a72-kube-api-access-xvm58" (OuterVolumeSpecName: "kube-api-access-xvm58") pod "5b769df1-897a-4560-a18d-cbb642930a72" (UID: "5b769df1-897a-4560-a18d-cbb642930a72"). InnerVolumeSpecName "kube-api-access-xvm58". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:01:01 crc kubenswrapper[4854]: I1125 10:01:01.649849 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b769df1-897a-4560-a18d-cbb642930a72-scripts" (OuterVolumeSpecName: "scripts") pod "5b769df1-897a-4560-a18d-cbb642930a72" (UID: "5b769df1-897a-4560-a18d-cbb642930a72"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:01 crc kubenswrapper[4854]: I1125 10:01:01.696899 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 25 10:01:01 crc kubenswrapper[4854]: I1125 10:01:01.698493 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b769df1-897a-4560-a18d-cbb642930a72-config-data" (OuterVolumeSpecName: "config-data") pod "5b769df1-897a-4560-a18d-cbb642930a72" (UID: "5b769df1-897a-4560-a18d-cbb642930a72"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:01 crc kubenswrapper[4854]: I1125 10:01:01.711865 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b769df1-897a-4560-a18d-cbb642930a72-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5b769df1-897a-4560-a18d-cbb642930a72" (UID: "5b769df1-897a-4560-a18d-cbb642930a72"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:01 crc kubenswrapper[4854]: I1125 10:01:01.729167 4854 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b769df1-897a-4560-a18d-cbb642930a72-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:01 crc kubenswrapper[4854]: I1125 10:01:01.729203 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b769df1-897a-4560-a18d-cbb642930a72-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:01 crc kubenswrapper[4854]: I1125 10:01:01.729214 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xvm58\" (UniqueName: \"kubernetes.io/projected/5b769df1-897a-4560-a18d-cbb642930a72-kube-api-access-xvm58\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:01 crc kubenswrapper[4854]: I1125 10:01:01.729223 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b769df1-897a-4560-a18d-cbb642930a72-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:01 crc kubenswrapper[4854]: I1125 10:01:01.910340 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e9b28076-56b0-47d7-a0b5-f82956ea494a","Type":"ContainerStarted","Data":"1ab9cadd9c09a3a6ec7138425aa7ed22e7bb15d7aa6ca60e9457c2b811ebc998"} Nov 25 10:01:01 crc kubenswrapper[4854]: I1125 10:01:01.939263 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-6k8ck" event={"ID":"5b769df1-897a-4560-a18d-cbb642930a72","Type":"ContainerDied","Data":"79539487146f02b729722c061fa178dc6c60b315cb58c41f36da6e45b8c1130a"} Nov 25 10:01:01 crc kubenswrapper[4854]: I1125 10:01:01.939528 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="79539487146f02b729722c061fa178dc6c60b315cb58c41f36da6e45b8c1130a" Nov 25 10:01:01 crc kubenswrapper[4854]: I1125 10:01:01.939280 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-6k8ck" Nov 25 10:01:01 crc kubenswrapper[4854]: I1125 10:01:01.959872 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401081-jn8kd" event={"ID":"ee7b8559-e810-42ef-99b7-e206a817fd29","Type":"ContainerStarted","Data":"86da15c099056e0bcea3a48ec9eb715c11c40817caa6f61f58fb8cfaeecb9232"} Nov 25 10:01:01 crc kubenswrapper[4854]: I1125 10:01:01.960037 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401081-jn8kd" event={"ID":"ee7b8559-e810-42ef-99b7-e206a817fd29","Type":"ContainerStarted","Data":"3e321a93e08fdcf0fb0c6fa47851b927831fa4e9527fddf2bbc3ab0eae95a9f2"} Nov 25 10:01:02 crc kubenswrapper[4854]: I1125 10:01:02.007379 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29401081-jn8kd" podStartSLOduration=2.007355051 podStartE2EDuration="2.007355051s" podCreationTimestamp="2025-11-25 10:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:01:01.98637819 +0000 UTC m=+1467.839371576" watchObservedRunningTime="2025-11-25 10:01:02.007355051 +0000 UTC m=+1467.860348427" Nov 25 10:01:02 crc kubenswrapper[4854]: I1125 10:01:02.077244 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 25 10:01:02 crc kubenswrapper[4854]: E1125 10:01:02.077820 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b769df1-897a-4560-a18d-cbb642930a72" containerName="nova-cell1-conductor-db-sync" Nov 25 10:01:02 crc kubenswrapper[4854]: I1125 10:01:02.077838 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b769df1-897a-4560-a18d-cbb642930a72" containerName="nova-cell1-conductor-db-sync" Nov 25 10:01:02 crc kubenswrapper[4854]: I1125 10:01:02.078128 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b769df1-897a-4560-a18d-cbb642930a72" containerName="nova-cell1-conductor-db-sync" Nov 25 10:01:02 crc kubenswrapper[4854]: I1125 10:01:02.079019 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 25 10:01:02 crc kubenswrapper[4854]: I1125 10:01:02.084059 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 25 10:01:02 crc kubenswrapper[4854]: I1125 10:01:02.093998 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 25 10:01:02 crc kubenswrapper[4854]: I1125 10:01:02.138570 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4367eb79-33da-49b0-8471-f07db0d493b4-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"4367eb79-33da-49b0-8471-f07db0d493b4\") " pod="openstack/nova-cell1-conductor-0" Nov 25 10:01:02 crc kubenswrapper[4854]: I1125 10:01:02.138614 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kfn8r\" (UniqueName: \"kubernetes.io/projected/4367eb79-33da-49b0-8471-f07db0d493b4-kube-api-access-kfn8r\") pod \"nova-cell1-conductor-0\" (UID: \"4367eb79-33da-49b0-8471-f07db0d493b4\") " pod="openstack/nova-cell1-conductor-0" Nov 25 10:01:02 crc kubenswrapper[4854]: I1125 10:01:02.138635 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4367eb79-33da-49b0-8471-f07db0d493b4-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"4367eb79-33da-49b0-8471-f07db0d493b4\") " pod="openstack/nova-cell1-conductor-0" Nov 25 10:01:02 crc kubenswrapper[4854]: I1125 10:01:02.241124 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4367eb79-33da-49b0-8471-f07db0d493b4-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"4367eb79-33da-49b0-8471-f07db0d493b4\") " pod="openstack/nova-cell1-conductor-0" Nov 25 10:01:02 crc kubenswrapper[4854]: I1125 10:01:02.241485 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kfn8r\" (UniqueName: \"kubernetes.io/projected/4367eb79-33da-49b0-8471-f07db0d493b4-kube-api-access-kfn8r\") pod \"nova-cell1-conductor-0\" (UID: \"4367eb79-33da-49b0-8471-f07db0d493b4\") " pod="openstack/nova-cell1-conductor-0" Nov 25 10:01:02 crc kubenswrapper[4854]: I1125 10:01:02.241602 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4367eb79-33da-49b0-8471-f07db0d493b4-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"4367eb79-33da-49b0-8471-f07db0d493b4\") " pod="openstack/nova-cell1-conductor-0" Nov 25 10:01:02 crc kubenswrapper[4854]: I1125 10:01:02.246504 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4367eb79-33da-49b0-8471-f07db0d493b4-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"4367eb79-33da-49b0-8471-f07db0d493b4\") " pod="openstack/nova-cell1-conductor-0" Nov 25 10:01:02 crc kubenswrapper[4854]: I1125 10:01:02.248386 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4367eb79-33da-49b0-8471-f07db0d493b4-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"4367eb79-33da-49b0-8471-f07db0d493b4\") " pod="openstack/nova-cell1-conductor-0" Nov 25 10:01:02 crc kubenswrapper[4854]: I1125 10:01:02.261768 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kfn8r\" (UniqueName: \"kubernetes.io/projected/4367eb79-33da-49b0-8471-f07db0d493b4-kube-api-access-kfn8r\") pod \"nova-cell1-conductor-0\" (UID: \"4367eb79-33da-49b0-8471-f07db0d493b4\") " pod="openstack/nova-cell1-conductor-0" Nov 25 10:01:02 crc kubenswrapper[4854]: I1125 10:01:02.408457 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 25 10:01:02 crc kubenswrapper[4854]: W1125 10:01:02.984841 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4367eb79_33da_49b0_8471_f07db0d493b4.slice/crio-4715393a0bd667f2a9e36b54908b66f8c8053216cde88a09f04e4b529a887669 WatchSource:0}: Error finding container 4715393a0bd667f2a9e36b54908b66f8c8053216cde88a09f04e4b529a887669: Status 404 returned error can't find the container with id 4715393a0bd667f2a9e36b54908b66f8c8053216cde88a09f04e4b529a887669 Nov 25 10:01:03 crc kubenswrapper[4854]: I1125 10:01:02.998352 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e9b28076-56b0-47d7-a0b5-f82956ea494a","Type":"ContainerStarted","Data":"61abe6eb764a470729cef52909a645b5a60402274b0a87ae475601835768bf01"} Nov 25 10:01:03 crc kubenswrapper[4854]: I1125 10:01:03.001743 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 25 10:01:03 crc kubenswrapper[4854]: I1125 10:01:03.827185 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Nov 25 10:01:04 crc kubenswrapper[4854]: I1125 10:01:04.032645 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"4367eb79-33da-49b0-8471-f07db0d493b4","Type":"ContainerStarted","Data":"29190f7fd6d47383a4516b38165c87fe89e6584d80b9af83c3fd57f40446257a"} Nov 25 10:01:04 crc kubenswrapper[4854]: I1125 10:01:04.035037 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 25 10:01:04 crc kubenswrapper[4854]: I1125 10:01:04.035464 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"4367eb79-33da-49b0-8471-f07db0d493b4","Type":"ContainerStarted","Data":"4715393a0bd667f2a9e36b54908b66f8c8053216cde88a09f04e4b529a887669"} Nov 25 10:01:04 crc kubenswrapper[4854]: I1125 10:01:04.035934 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:01:04 crc kubenswrapper[4854]: I1125 10:01:04.036316 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="10883200-ec1b-4bca-835d-a30490040779" containerName="ceilometer-central-agent" containerID="cri-o://e00bb924889ada06fb650c35567ed8f9d43660802607200e06ae1b0a4ce80254" gracePeriod=30 Nov 25 10:01:04 crc kubenswrapper[4854]: I1125 10:01:04.036542 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="10883200-ec1b-4bca-835d-a30490040779" containerName="proxy-httpd" containerID="cri-o://17f945bd7cd527c6afceecfcfb99fd6917f7636699a41400b91f3355e4b228ed" gracePeriod=30 Nov 25 10:01:04 crc kubenswrapper[4854]: I1125 10:01:04.036543 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="10883200-ec1b-4bca-835d-a30490040779" containerName="sg-core" containerID="cri-o://84082d390b0f998eeb9a7f4ca7cdd42b8e0eecd1c8b9b90a98e9acc62bd56142" gracePeriod=30 Nov 25 10:01:04 crc kubenswrapper[4854]: I1125 10:01:04.036594 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="10883200-ec1b-4bca-835d-a30490040779" containerName="ceilometer-notification-agent" containerID="cri-o://7f00644ded00b948a1b0e02d294b2bc9a4e59b086eec9162f8c2b7c9f38de428" gracePeriod=30 Nov 25 10:01:04 crc kubenswrapper[4854]: I1125 10:01:04.069975 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=3.069953448 podStartE2EDuration="3.069953448s" podCreationTimestamp="2025-11-25 10:01:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:01:04.065179006 +0000 UTC m=+1469.918172382" watchObservedRunningTime="2025-11-25 10:01:04.069953448 +0000 UTC m=+1469.922946824" Nov 25 10:01:05 crc kubenswrapper[4854]: I1125 10:01:05.047232 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e9b28076-56b0-47d7-a0b5-f82956ea494a","Type":"ContainerStarted","Data":"faa2552e1b2ce50a4cf818dd04e46468871f8cdf07be841a6e153bb1ba69320a"} Nov 25 10:01:05 crc kubenswrapper[4854]: I1125 10:01:05.053339 4854 generic.go:334] "Generic (PLEG): container finished" podID="10883200-ec1b-4bca-835d-a30490040779" containerID="17f945bd7cd527c6afceecfcfb99fd6917f7636699a41400b91f3355e4b228ed" exitCode=0 Nov 25 10:01:05 crc kubenswrapper[4854]: I1125 10:01:05.053393 4854 generic.go:334] "Generic (PLEG): container finished" podID="10883200-ec1b-4bca-835d-a30490040779" containerID="84082d390b0f998eeb9a7f4ca7cdd42b8e0eecd1c8b9b90a98e9acc62bd56142" exitCode=2 Nov 25 10:01:05 crc kubenswrapper[4854]: I1125 10:01:05.053403 4854 generic.go:334] "Generic (PLEG): container finished" podID="10883200-ec1b-4bca-835d-a30490040779" containerID="e00bb924889ada06fb650c35567ed8f9d43660802607200e06ae1b0a4ce80254" exitCode=0 Nov 25 10:01:05 crc kubenswrapper[4854]: I1125 10:01:05.053397 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"10883200-ec1b-4bca-835d-a30490040779","Type":"ContainerDied","Data":"17f945bd7cd527c6afceecfcfb99fd6917f7636699a41400b91f3355e4b228ed"} Nov 25 10:01:05 crc kubenswrapper[4854]: I1125 10:01:05.053468 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"10883200-ec1b-4bca-835d-a30490040779","Type":"ContainerDied","Data":"84082d390b0f998eeb9a7f4ca7cdd42b8e0eecd1c8b9b90a98e9acc62bd56142"} Nov 25 10:01:05 crc kubenswrapper[4854]: I1125 10:01:05.053483 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"10883200-ec1b-4bca-835d-a30490040779","Type":"ContainerDied","Data":"e00bb924889ada06fb650c35567ed8f9d43660802607200e06ae1b0a4ce80254"} Nov 25 10:01:06 crc kubenswrapper[4854]: I1125 10:01:06.069434 4854 generic.go:334] "Generic (PLEG): container finished" podID="ee7b8559-e810-42ef-99b7-e206a817fd29" containerID="86da15c099056e0bcea3a48ec9eb715c11c40817caa6f61f58fb8cfaeecb9232" exitCode=0 Nov 25 10:01:06 crc kubenswrapper[4854]: I1125 10:01:06.069504 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401081-jn8kd" event={"ID":"ee7b8559-e810-42ef-99b7-e206a817fd29","Type":"ContainerDied","Data":"86da15c099056e0bcea3a48ec9eb715c11c40817caa6f61f58fb8cfaeecb9232"} Nov 25 10:01:06 crc kubenswrapper[4854]: E1125 10:01:06.430706 4854 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda16432da_436b_4d4c_b383_e009ff4a4ff6.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda16432da_436b_4d4c_b383_e009ff4a4ff6.slice/crio-fc1ce70cb30329b8b1d10d10efe6bb7b931f1af836e0f1bdb147f97c6bbd3eec\": RecentStats: unable to find data in memory cache]" Nov 25 10:01:06 crc kubenswrapper[4854]: I1125 10:01:06.936053 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.007341 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/10883200-ec1b-4bca-835d-a30490040779-sg-core-conf-yaml\") pod \"10883200-ec1b-4bca-835d-a30490040779\" (UID: \"10883200-ec1b-4bca-835d-a30490040779\") " Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.007518 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10883200-ec1b-4bca-835d-a30490040779-scripts\") pod \"10883200-ec1b-4bca-835d-a30490040779\" (UID: \"10883200-ec1b-4bca-835d-a30490040779\") " Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.008234 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10883200-ec1b-4bca-835d-a30490040779-config-data\") pod \"10883200-ec1b-4bca-835d-a30490040779\" (UID: \"10883200-ec1b-4bca-835d-a30490040779\") " Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.008340 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/10883200-ec1b-4bca-835d-a30490040779-log-httpd\") pod \"10883200-ec1b-4bca-835d-a30490040779\" (UID: \"10883200-ec1b-4bca-835d-a30490040779\") " Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.008481 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tqcdf\" (UniqueName: \"kubernetes.io/projected/10883200-ec1b-4bca-835d-a30490040779-kube-api-access-tqcdf\") pod \"10883200-ec1b-4bca-835d-a30490040779\" (UID: \"10883200-ec1b-4bca-835d-a30490040779\") " Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.008526 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/10883200-ec1b-4bca-835d-a30490040779-run-httpd\") pod \"10883200-ec1b-4bca-835d-a30490040779\" (UID: \"10883200-ec1b-4bca-835d-a30490040779\") " Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.008557 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10883200-ec1b-4bca-835d-a30490040779-combined-ca-bundle\") pod \"10883200-ec1b-4bca-835d-a30490040779\" (UID: \"10883200-ec1b-4bca-835d-a30490040779\") " Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.010316 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/10883200-ec1b-4bca-835d-a30490040779-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "10883200-ec1b-4bca-835d-a30490040779" (UID: "10883200-ec1b-4bca-835d-a30490040779"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.010822 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/10883200-ec1b-4bca-835d-a30490040779-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "10883200-ec1b-4bca-835d-a30490040779" (UID: "10883200-ec1b-4bca-835d-a30490040779"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.014261 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10883200-ec1b-4bca-835d-a30490040779-kube-api-access-tqcdf" (OuterVolumeSpecName: "kube-api-access-tqcdf") pod "10883200-ec1b-4bca-835d-a30490040779" (UID: "10883200-ec1b-4bca-835d-a30490040779"). InnerVolumeSpecName "kube-api-access-tqcdf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.019126 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10883200-ec1b-4bca-835d-a30490040779-scripts" (OuterVolumeSpecName: "scripts") pod "10883200-ec1b-4bca-835d-a30490040779" (UID: "10883200-ec1b-4bca-835d-a30490040779"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.071168 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10883200-ec1b-4bca-835d-a30490040779-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "10883200-ec1b-4bca-835d-a30490040779" (UID: "10883200-ec1b-4bca-835d-a30490040779"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.086464 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e9b28076-56b0-47d7-a0b5-f82956ea494a","Type":"ContainerStarted","Data":"7da4eab9ec0ee3a10ad4e4fb28dbb1690c4aef49e5205cd9f264cd2d8389f7f6"} Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.108190 4854 generic.go:334] "Generic (PLEG): container finished" podID="10883200-ec1b-4bca-835d-a30490040779" containerID="7f00644ded00b948a1b0e02d294b2bc9a4e59b086eec9162f8c2b7c9f38de428" exitCode=0 Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.108451 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.109340 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"10883200-ec1b-4bca-835d-a30490040779","Type":"ContainerDied","Data":"7f00644ded00b948a1b0e02d294b2bc9a4e59b086eec9162f8c2b7c9f38de428"} Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.109379 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"10883200-ec1b-4bca-835d-a30490040779","Type":"ContainerDied","Data":"9dbecd9c9833fc94168a42d2fdd91f45743710c2d291ff023bc8b6fa1611d0d6"} Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.109399 4854 scope.go:117] "RemoveContainer" containerID="17f945bd7cd527c6afceecfcfb99fd6917f7636699a41400b91f3355e4b228ed" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.114593 4854 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/10883200-ec1b-4bca-835d-a30490040779-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.114622 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tqcdf\" (UniqueName: \"kubernetes.io/projected/10883200-ec1b-4bca-835d-a30490040779-kube-api-access-tqcdf\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.114634 4854 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/10883200-ec1b-4bca-835d-a30490040779-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.114644 4854 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/10883200-ec1b-4bca-835d-a30490040779-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.114655 4854 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10883200-ec1b-4bca-835d-a30490040779-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.166764 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10883200-ec1b-4bca-835d-a30490040779-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "10883200-ec1b-4bca-835d-a30490040779" (UID: "10883200-ec1b-4bca-835d-a30490040779"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.169751 4854 scope.go:117] "RemoveContainer" containerID="84082d390b0f998eeb9a7f4ca7cdd42b8e0eecd1c8b9b90a98e9acc62bd56142" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.182814 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10883200-ec1b-4bca-835d-a30490040779-config-data" (OuterVolumeSpecName: "config-data") pod "10883200-ec1b-4bca-835d-a30490040779" (UID: "10883200-ec1b-4bca-835d-a30490040779"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.216567 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10883200-ec1b-4bca-835d-a30490040779-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.216627 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10883200-ec1b-4bca-835d-a30490040779-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.240028 4854 scope.go:117] "RemoveContainer" containerID="7f00644ded00b948a1b0e02d294b2bc9a4e59b086eec9162f8c2b7c9f38de428" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.258305 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.258346 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.368992 4854 scope.go:117] "RemoveContainer" containerID="e00bb924889ada06fb650c35567ed8f9d43660802607200e06ae1b0a4ce80254" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.545265 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.548396 4854 scope.go:117] "RemoveContainer" containerID="17f945bd7cd527c6afceecfcfb99fd6917f7636699a41400b91f3355e4b228ed" Nov 25 10:01:07 crc kubenswrapper[4854]: E1125 10:01:07.557826 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"17f945bd7cd527c6afceecfcfb99fd6917f7636699a41400b91f3355e4b228ed\": container with ID starting with 17f945bd7cd527c6afceecfcfb99fd6917f7636699a41400b91f3355e4b228ed not found: ID does not exist" containerID="17f945bd7cd527c6afceecfcfb99fd6917f7636699a41400b91f3355e4b228ed" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.557885 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"17f945bd7cd527c6afceecfcfb99fd6917f7636699a41400b91f3355e4b228ed"} err="failed to get container status \"17f945bd7cd527c6afceecfcfb99fd6917f7636699a41400b91f3355e4b228ed\": rpc error: code = NotFound desc = could not find container \"17f945bd7cd527c6afceecfcfb99fd6917f7636699a41400b91f3355e4b228ed\": container with ID starting with 17f945bd7cd527c6afceecfcfb99fd6917f7636699a41400b91f3355e4b228ed not found: ID does not exist" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.557917 4854 scope.go:117] "RemoveContainer" containerID="84082d390b0f998eeb9a7f4ca7cdd42b8e0eecd1c8b9b90a98e9acc62bd56142" Nov 25 10:01:07 crc kubenswrapper[4854]: E1125 10:01:07.563103 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"84082d390b0f998eeb9a7f4ca7cdd42b8e0eecd1c8b9b90a98e9acc62bd56142\": container with ID starting with 84082d390b0f998eeb9a7f4ca7cdd42b8e0eecd1c8b9b90a98e9acc62bd56142 not found: ID does not exist" containerID="84082d390b0f998eeb9a7f4ca7cdd42b8e0eecd1c8b9b90a98e9acc62bd56142" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.563321 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"84082d390b0f998eeb9a7f4ca7cdd42b8e0eecd1c8b9b90a98e9acc62bd56142"} err="failed to get container status \"84082d390b0f998eeb9a7f4ca7cdd42b8e0eecd1c8b9b90a98e9acc62bd56142\": rpc error: code = NotFound desc = could not find container \"84082d390b0f998eeb9a7f4ca7cdd42b8e0eecd1c8b9b90a98e9acc62bd56142\": container with ID starting with 84082d390b0f998eeb9a7f4ca7cdd42b8e0eecd1c8b9b90a98e9acc62bd56142 not found: ID does not exist" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.563411 4854 scope.go:117] "RemoveContainer" containerID="7f00644ded00b948a1b0e02d294b2bc9a4e59b086eec9162f8c2b7c9f38de428" Nov 25 10:01:07 crc kubenswrapper[4854]: E1125 10:01:07.564101 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7f00644ded00b948a1b0e02d294b2bc9a4e59b086eec9162f8c2b7c9f38de428\": container with ID starting with 7f00644ded00b948a1b0e02d294b2bc9a4e59b086eec9162f8c2b7c9f38de428 not found: ID does not exist" containerID="7f00644ded00b948a1b0e02d294b2bc9a4e59b086eec9162f8c2b7c9f38de428" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.564134 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f00644ded00b948a1b0e02d294b2bc9a4e59b086eec9162f8c2b7c9f38de428"} err="failed to get container status \"7f00644ded00b948a1b0e02d294b2bc9a4e59b086eec9162f8c2b7c9f38de428\": rpc error: code = NotFound desc = could not find container \"7f00644ded00b948a1b0e02d294b2bc9a4e59b086eec9162f8c2b7c9f38de428\": container with ID starting with 7f00644ded00b948a1b0e02d294b2bc9a4e59b086eec9162f8c2b7c9f38de428 not found: ID does not exist" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.564156 4854 scope.go:117] "RemoveContainer" containerID="e00bb924889ada06fb650c35567ed8f9d43660802607200e06ae1b0a4ce80254" Nov 25 10:01:07 crc kubenswrapper[4854]: E1125 10:01:07.564477 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e00bb924889ada06fb650c35567ed8f9d43660802607200e06ae1b0a4ce80254\": container with ID starting with e00bb924889ada06fb650c35567ed8f9d43660802607200e06ae1b0a4ce80254 not found: ID does not exist" containerID="e00bb924889ada06fb650c35567ed8f9d43660802607200e06ae1b0a4ce80254" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.564498 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e00bb924889ada06fb650c35567ed8f9d43660802607200e06ae1b0a4ce80254"} err="failed to get container status \"e00bb924889ada06fb650c35567ed8f9d43660802607200e06ae1b0a4ce80254\": rpc error: code = NotFound desc = could not find container \"e00bb924889ada06fb650c35567ed8f9d43660802607200e06ae1b0a4ce80254\": container with ID starting with e00bb924889ada06fb650c35567ed8f9d43660802607200e06ae1b0a4ce80254 not found: ID does not exist" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.602814 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.627998 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:01:07 crc kubenswrapper[4854]: E1125 10:01:07.628563 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10883200-ec1b-4bca-835d-a30490040779" containerName="sg-core" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.628582 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="10883200-ec1b-4bca-835d-a30490040779" containerName="sg-core" Nov 25 10:01:07 crc kubenswrapper[4854]: E1125 10:01:07.628593 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10883200-ec1b-4bca-835d-a30490040779" containerName="ceilometer-central-agent" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.628601 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="10883200-ec1b-4bca-835d-a30490040779" containerName="ceilometer-central-agent" Nov 25 10:01:07 crc kubenswrapper[4854]: E1125 10:01:07.628641 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10883200-ec1b-4bca-835d-a30490040779" containerName="proxy-httpd" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.628647 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="10883200-ec1b-4bca-835d-a30490040779" containerName="proxy-httpd" Nov 25 10:01:07 crc kubenswrapper[4854]: E1125 10:01:07.628711 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10883200-ec1b-4bca-835d-a30490040779" containerName="ceilometer-notification-agent" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.628720 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="10883200-ec1b-4bca-835d-a30490040779" containerName="ceilometer-notification-agent" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.628928 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="10883200-ec1b-4bca-835d-a30490040779" containerName="ceilometer-notification-agent" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.628941 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="10883200-ec1b-4bca-835d-a30490040779" containerName="ceilometer-central-agent" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.628952 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="10883200-ec1b-4bca-835d-a30490040779" containerName="sg-core" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.628967 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="10883200-ec1b-4bca-835d-a30490040779" containerName="proxy-httpd" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.631509 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.634780 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.634927 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.664136 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.734913 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401081-jn8kd" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.740607 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tmff6\" (UniqueName: \"kubernetes.io/projected/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-kube-api-access-tmff6\") pod \"ceilometer-0\" (UID: \"935c8b5d-8fc1-4846-a5aa-6794a4833d9d\") " pod="openstack/ceilometer-0" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.740732 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-config-data\") pod \"ceilometer-0\" (UID: \"935c8b5d-8fc1-4846-a5aa-6794a4833d9d\") " pod="openstack/ceilometer-0" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.740865 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-run-httpd\") pod \"ceilometer-0\" (UID: \"935c8b5d-8fc1-4846-a5aa-6794a4833d9d\") " pod="openstack/ceilometer-0" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.740891 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"935c8b5d-8fc1-4846-a5aa-6794a4833d9d\") " pod="openstack/ceilometer-0" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.740952 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-scripts\") pod \"ceilometer-0\" (UID: \"935c8b5d-8fc1-4846-a5aa-6794a4833d9d\") " pod="openstack/ceilometer-0" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.740973 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"935c8b5d-8fc1-4846-a5aa-6794a4833d9d\") " pod="openstack/ceilometer-0" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.740997 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-log-httpd\") pod \"ceilometer-0\" (UID: \"935c8b5d-8fc1-4846-a5aa-6794a4833d9d\") " pod="openstack/ceilometer-0" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.830461 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:01:07 crc kubenswrapper[4854]: E1125 10:01:07.831699 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle config-data kube-api-access-tmff6 log-httpd run-httpd scripts sg-core-conf-yaml], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/ceilometer-0" podUID="935c8b5d-8fc1-4846-a5aa-6794a4833d9d" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.842957 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee7b8559-e810-42ef-99b7-e206a817fd29-config-data\") pod \"ee7b8559-e810-42ef-99b7-e206a817fd29\" (UID: \"ee7b8559-e810-42ef-99b7-e206a817fd29\") " Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.843106 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wtv97\" (UniqueName: \"kubernetes.io/projected/ee7b8559-e810-42ef-99b7-e206a817fd29-kube-api-access-wtv97\") pod \"ee7b8559-e810-42ef-99b7-e206a817fd29\" (UID: \"ee7b8559-e810-42ef-99b7-e206a817fd29\") " Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.843250 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ee7b8559-e810-42ef-99b7-e206a817fd29-fernet-keys\") pod \"ee7b8559-e810-42ef-99b7-e206a817fd29\" (UID: \"ee7b8559-e810-42ef-99b7-e206a817fd29\") " Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.843315 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee7b8559-e810-42ef-99b7-e206a817fd29-combined-ca-bundle\") pod \"ee7b8559-e810-42ef-99b7-e206a817fd29\" (UID: \"ee7b8559-e810-42ef-99b7-e206a817fd29\") " Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.843821 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-run-httpd\") pod \"ceilometer-0\" (UID: \"935c8b5d-8fc1-4846-a5aa-6794a4833d9d\") " pod="openstack/ceilometer-0" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.843848 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"935c8b5d-8fc1-4846-a5aa-6794a4833d9d\") " pod="openstack/ceilometer-0" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.843894 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-scripts\") pod \"ceilometer-0\" (UID: \"935c8b5d-8fc1-4846-a5aa-6794a4833d9d\") " pod="openstack/ceilometer-0" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.843912 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"935c8b5d-8fc1-4846-a5aa-6794a4833d9d\") " pod="openstack/ceilometer-0" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.843929 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-log-httpd\") pod \"ceilometer-0\" (UID: \"935c8b5d-8fc1-4846-a5aa-6794a4833d9d\") " pod="openstack/ceilometer-0" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.844000 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tmff6\" (UniqueName: \"kubernetes.io/projected/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-kube-api-access-tmff6\") pod \"ceilometer-0\" (UID: \"935c8b5d-8fc1-4846-a5aa-6794a4833d9d\") " pod="openstack/ceilometer-0" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.844061 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-config-data\") pod \"ceilometer-0\" (UID: \"935c8b5d-8fc1-4846-a5aa-6794a4833d9d\") " pod="openstack/ceilometer-0" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.844301 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-run-httpd\") pod \"ceilometer-0\" (UID: \"935c8b5d-8fc1-4846-a5aa-6794a4833d9d\") " pod="openstack/ceilometer-0" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.844630 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-log-httpd\") pod \"ceilometer-0\" (UID: \"935c8b5d-8fc1-4846-a5aa-6794a4833d9d\") " pod="openstack/ceilometer-0" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.851324 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.851552 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="a52007f7-ea68-422e-90d3-be11aa0184d5" containerName="kube-state-metrics" containerID="cri-o://abf7216f7246fc294d8ac4f76239a8fcc5bab668725dbe1e584de57ec01e0f2c" gracePeriod=30 Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.860100 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-scripts\") pod \"ceilometer-0\" (UID: \"935c8b5d-8fc1-4846-a5aa-6794a4833d9d\") " pod="openstack/ceilometer-0" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.889193 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee7b8559-e810-42ef-99b7-e206a817fd29-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "ee7b8559-e810-42ef-99b7-e206a817fd29" (UID: "ee7b8559-e810-42ef-99b7-e206a817fd29"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.890284 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-config-data\") pod \"ceilometer-0\" (UID: \"935c8b5d-8fc1-4846-a5aa-6794a4833d9d\") " pod="openstack/ceilometer-0" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.891581 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"935c8b5d-8fc1-4846-a5aa-6794a4833d9d\") " pod="openstack/ceilometer-0" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.895767 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee7b8559-e810-42ef-99b7-e206a817fd29-kube-api-access-wtv97" (OuterVolumeSpecName: "kube-api-access-wtv97") pod "ee7b8559-e810-42ef-99b7-e206a817fd29" (UID: "ee7b8559-e810-42ef-99b7-e206a817fd29"). InnerVolumeSpecName "kube-api-access-wtv97". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.903509 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"935c8b5d-8fc1-4846-a5aa-6794a4833d9d\") " pod="openstack/ceilometer-0" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.910880 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tmff6\" (UniqueName: \"kubernetes.io/projected/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-kube-api-access-tmff6\") pod \"ceilometer-0\" (UID: \"935c8b5d-8fc1-4846-a5aa-6794a4833d9d\") " pod="openstack/ceilometer-0" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.925057 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee7b8559-e810-42ef-99b7-e206a817fd29-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ee7b8559-e810-42ef-99b7-e206a817fd29" (UID: "ee7b8559-e810-42ef-99b7-e206a817fd29"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.996358 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wtv97\" (UniqueName: \"kubernetes.io/projected/ee7b8559-e810-42ef-99b7-e206a817fd29-kube-api-access-wtv97\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.996400 4854 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ee7b8559-e810-42ef-99b7-e206a817fd29-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:07 crc kubenswrapper[4854]: I1125 10:01:07.996411 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee7b8559-e810-42ef-99b7-e206a817fd29-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:08 crc kubenswrapper[4854]: I1125 10:01:08.007270 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-0"] Nov 25 10:01:08 crc kubenswrapper[4854]: I1125 10:01:08.007913 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/mysqld-exporter-0" podUID="8a1342b2-9a0f-42fe-85f4-d706eb2587d4" containerName="mysqld-exporter" containerID="cri-o://847bbab51518ae8e9b903c06b8d6d932fd6de9eec6bba9ff7eaa2caef4c61e1c" gracePeriod=30 Nov 25 10:01:08 crc kubenswrapper[4854]: I1125 10:01:08.007928 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee7b8559-e810-42ef-99b7-e206a817fd29-config-data" (OuterVolumeSpecName: "config-data") pod "ee7b8559-e810-42ef-99b7-e206a817fd29" (UID: "ee7b8559-e810-42ef-99b7-e206a817fd29"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:08 crc kubenswrapper[4854]: I1125 10:01:08.101181 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee7b8559-e810-42ef-99b7-e206a817fd29-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:08 crc kubenswrapper[4854]: I1125 10:01:08.141050 4854 generic.go:334] "Generic (PLEG): container finished" podID="a52007f7-ea68-422e-90d3-be11aa0184d5" containerID="abf7216f7246fc294d8ac4f76239a8fcc5bab668725dbe1e584de57ec01e0f2c" exitCode=2 Nov 25 10:01:08 crc kubenswrapper[4854]: I1125 10:01:08.141175 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"a52007f7-ea68-422e-90d3-be11aa0184d5","Type":"ContainerDied","Data":"abf7216f7246fc294d8ac4f76239a8fcc5bab668725dbe1e584de57ec01e0f2c"} Nov 25 10:01:08 crc kubenswrapper[4854]: I1125 10:01:08.149743 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401081-jn8kd" event={"ID":"ee7b8559-e810-42ef-99b7-e206a817fd29","Type":"ContainerDied","Data":"3e321a93e08fdcf0fb0c6fa47851b927831fa4e9527fddf2bbc3ab0eae95a9f2"} Nov 25 10:01:08 crc kubenswrapper[4854]: I1125 10:01:08.149799 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3e321a93e08fdcf0fb0c6fa47851b927831fa4e9527fddf2bbc3ab0eae95a9f2" Nov 25 10:01:08 crc kubenswrapper[4854]: I1125 10:01:08.149890 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401081-jn8kd" Nov 25 10:01:08 crc kubenswrapper[4854]: I1125 10:01:08.163870 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:01:08 crc kubenswrapper[4854]: I1125 10:01:08.188797 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:01:08 crc kubenswrapper[4854]: I1125 10:01:08.306550 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-sg-core-conf-yaml\") pod \"935c8b5d-8fc1-4846-a5aa-6794a4833d9d\" (UID: \"935c8b5d-8fc1-4846-a5aa-6794a4833d9d\") " Nov 25 10:01:08 crc kubenswrapper[4854]: I1125 10:01:08.306992 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-scripts\") pod \"935c8b5d-8fc1-4846-a5aa-6794a4833d9d\" (UID: \"935c8b5d-8fc1-4846-a5aa-6794a4833d9d\") " Nov 25 10:01:08 crc kubenswrapper[4854]: I1125 10:01:08.307046 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-run-httpd\") pod \"935c8b5d-8fc1-4846-a5aa-6794a4833d9d\" (UID: \"935c8b5d-8fc1-4846-a5aa-6794a4833d9d\") " Nov 25 10:01:08 crc kubenswrapper[4854]: I1125 10:01:08.307107 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-combined-ca-bundle\") pod \"935c8b5d-8fc1-4846-a5aa-6794a4833d9d\" (UID: \"935c8b5d-8fc1-4846-a5aa-6794a4833d9d\") " Nov 25 10:01:08 crc kubenswrapper[4854]: I1125 10:01:08.307170 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-config-data\") pod \"935c8b5d-8fc1-4846-a5aa-6794a4833d9d\" (UID: \"935c8b5d-8fc1-4846-a5aa-6794a4833d9d\") " Nov 25 10:01:08 crc kubenswrapper[4854]: I1125 10:01:08.307274 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-log-httpd\") pod \"935c8b5d-8fc1-4846-a5aa-6794a4833d9d\" (UID: \"935c8b5d-8fc1-4846-a5aa-6794a4833d9d\") " Nov 25 10:01:08 crc kubenswrapper[4854]: I1125 10:01:08.307333 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tmff6\" (UniqueName: \"kubernetes.io/projected/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-kube-api-access-tmff6\") pod \"935c8b5d-8fc1-4846-a5aa-6794a4833d9d\" (UID: \"935c8b5d-8fc1-4846-a5aa-6794a4833d9d\") " Nov 25 10:01:08 crc kubenswrapper[4854]: I1125 10:01:08.308403 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "935c8b5d-8fc1-4846-a5aa-6794a4833d9d" (UID: "935c8b5d-8fc1-4846-a5aa-6794a4833d9d"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:01:08 crc kubenswrapper[4854]: I1125 10:01:08.315471 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-kube-api-access-tmff6" (OuterVolumeSpecName: "kube-api-access-tmff6") pod "935c8b5d-8fc1-4846-a5aa-6794a4833d9d" (UID: "935c8b5d-8fc1-4846-a5aa-6794a4833d9d"). InnerVolumeSpecName "kube-api-access-tmff6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:01:08 crc kubenswrapper[4854]: I1125 10:01:08.318101 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "935c8b5d-8fc1-4846-a5aa-6794a4833d9d" (UID: "935c8b5d-8fc1-4846-a5aa-6794a4833d9d"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:01:08 crc kubenswrapper[4854]: I1125 10:01:08.319008 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-scripts" (OuterVolumeSpecName: "scripts") pod "935c8b5d-8fc1-4846-a5aa-6794a4833d9d" (UID: "935c8b5d-8fc1-4846-a5aa-6794a4833d9d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:08 crc kubenswrapper[4854]: I1125 10:01:08.322419 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-config-data" (OuterVolumeSpecName: "config-data") pod "935c8b5d-8fc1-4846-a5aa-6794a4833d9d" (UID: "935c8b5d-8fc1-4846-a5aa-6794a4833d9d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:08 crc kubenswrapper[4854]: I1125 10:01:08.322761 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "935c8b5d-8fc1-4846-a5aa-6794a4833d9d" (UID: "935c8b5d-8fc1-4846-a5aa-6794a4833d9d"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:08 crc kubenswrapper[4854]: I1125 10:01:08.323898 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "935c8b5d-8fc1-4846-a5aa-6794a4833d9d" (UID: "935c8b5d-8fc1-4846-a5aa-6794a4833d9d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:08 crc kubenswrapper[4854]: I1125 10:01:08.347751 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="9064c524-7e0e-4327-ae10-b531d78c450b" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.247:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:01:08 crc kubenswrapper[4854]: I1125 10:01:08.348445 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="9064c524-7e0e-4327-ae10-b531d78c450b" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.247:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:01:08 crc kubenswrapper[4854]: I1125 10:01:08.411050 4854 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:08 crc kubenswrapper[4854]: I1125 10:01:08.411090 4854 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:08 crc kubenswrapper[4854]: I1125 10:01:08.411103 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:08 crc kubenswrapper[4854]: I1125 10:01:08.411117 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:08 crc kubenswrapper[4854]: I1125 10:01:08.411127 4854 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:08 crc kubenswrapper[4854]: I1125 10:01:08.411138 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tmff6\" (UniqueName: \"kubernetes.io/projected/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-kube-api-access-tmff6\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:08 crc kubenswrapper[4854]: I1125 10:01:08.411149 4854 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/935c8b5d-8fc1-4846-a5aa-6794a4833d9d-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:08 crc kubenswrapper[4854]: I1125 10:01:08.754757 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 10:01:08 crc kubenswrapper[4854]: I1125 10:01:08.925461 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w5t8s\" (UniqueName: \"kubernetes.io/projected/a52007f7-ea68-422e-90d3-be11aa0184d5-kube-api-access-w5t8s\") pod \"a52007f7-ea68-422e-90d3-be11aa0184d5\" (UID: \"a52007f7-ea68-422e-90d3-be11aa0184d5\") " Nov 25 10:01:08 crc kubenswrapper[4854]: I1125 10:01:08.933834 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a52007f7-ea68-422e-90d3-be11aa0184d5-kube-api-access-w5t8s" (OuterVolumeSpecName: "kube-api-access-w5t8s") pod "a52007f7-ea68-422e-90d3-be11aa0184d5" (UID: "a52007f7-ea68-422e-90d3-be11aa0184d5"). InnerVolumeSpecName "kube-api-access-w5t8s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:01:08 crc kubenswrapper[4854]: I1125 10:01:08.955408 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.030411 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w5t8s\" (UniqueName: \"kubernetes.io/projected/a52007f7-ea68-422e-90d3-be11aa0184d5-kube-api-access-w5t8s\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.060015 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="10883200-ec1b-4bca-835d-a30490040779" path="/var/lib/kubelet/pods/10883200-ec1b-4bca-835d-a30490040779/volumes" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.132412 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a1342b2-9a0f-42fe-85f4-d706eb2587d4-combined-ca-bundle\") pod \"8a1342b2-9a0f-42fe-85f4-d706eb2587d4\" (UID: \"8a1342b2-9a0f-42fe-85f4-d706eb2587d4\") " Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.132557 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8bvsj\" (UniqueName: \"kubernetes.io/projected/8a1342b2-9a0f-42fe-85f4-d706eb2587d4-kube-api-access-8bvsj\") pod \"8a1342b2-9a0f-42fe-85f4-d706eb2587d4\" (UID: \"8a1342b2-9a0f-42fe-85f4-d706eb2587d4\") " Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.132661 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a1342b2-9a0f-42fe-85f4-d706eb2587d4-config-data\") pod \"8a1342b2-9a0f-42fe-85f4-d706eb2587d4\" (UID: \"8a1342b2-9a0f-42fe-85f4-d706eb2587d4\") " Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.139082 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a1342b2-9a0f-42fe-85f4-d706eb2587d4-kube-api-access-8bvsj" (OuterVolumeSpecName: "kube-api-access-8bvsj") pod "8a1342b2-9a0f-42fe-85f4-d706eb2587d4" (UID: "8a1342b2-9a0f-42fe-85f4-d706eb2587d4"). InnerVolumeSpecName "kube-api-access-8bvsj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.186027 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a1342b2-9a0f-42fe-85f4-d706eb2587d4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8a1342b2-9a0f-42fe-85f4-d706eb2587d4" (UID: "8a1342b2-9a0f-42fe-85f4-d706eb2587d4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.188980 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"a52007f7-ea68-422e-90d3-be11aa0184d5","Type":"ContainerDied","Data":"3d7a10d837a3a4f4751178cc7e6b621048bc97c7bbc1d3068336eec6f2ba8cdb"} Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.189026 4854 scope.go:117] "RemoveContainer" containerID="abf7216f7246fc294d8ac4f76239a8fcc5bab668725dbe1e584de57ec01e0f2c" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.189160 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.191618 4854 generic.go:334] "Generic (PLEG): container finished" podID="8a1342b2-9a0f-42fe-85f4-d706eb2587d4" containerID="847bbab51518ae8e9b903c06b8d6d932fd6de9eec6bba9ff7eaa2caef4c61e1c" exitCode=2 Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.191706 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.192147 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.192203 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"8a1342b2-9a0f-42fe-85f4-d706eb2587d4","Type":"ContainerDied","Data":"847bbab51518ae8e9b903c06b8d6d932fd6de9eec6bba9ff7eaa2caef4c61e1c"} Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.192232 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"8a1342b2-9a0f-42fe-85f4-d706eb2587d4","Type":"ContainerDied","Data":"a6710b883700dd6b7b4ce3f4d9543f074cd0a42930bb7c7a1f0ca20c70859ece"} Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.236177 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a1342b2-9a0f-42fe-85f4-d706eb2587d4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.236214 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8bvsj\" (UniqueName: \"kubernetes.io/projected/8a1342b2-9a0f-42fe-85f4-d706eb2587d4-kube-api-access-8bvsj\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.260111 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a1342b2-9a0f-42fe-85f4-d706eb2587d4-config-data" (OuterVolumeSpecName: "config-data") pod "8a1342b2-9a0f-42fe-85f4-d706eb2587d4" (UID: "8a1342b2-9a0f-42fe-85f4-d706eb2587d4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.305581 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.321053 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.339571 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a1342b2-9a0f-42fe-85f4-d706eb2587d4-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.351726 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.373843 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.387199 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:01:09 crc kubenswrapper[4854]: E1125 10:01:09.387770 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a1342b2-9a0f-42fe-85f4-d706eb2587d4" containerName="mysqld-exporter" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.387788 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a1342b2-9a0f-42fe-85f4-d706eb2587d4" containerName="mysqld-exporter" Nov 25 10:01:09 crc kubenswrapper[4854]: E1125 10:01:09.387831 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a52007f7-ea68-422e-90d3-be11aa0184d5" containerName="kube-state-metrics" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.387840 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="a52007f7-ea68-422e-90d3-be11aa0184d5" containerName="kube-state-metrics" Nov 25 10:01:09 crc kubenswrapper[4854]: E1125 10:01:09.387872 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee7b8559-e810-42ef-99b7-e206a817fd29" containerName="keystone-cron" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.387879 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee7b8559-e810-42ef-99b7-e206a817fd29" containerName="keystone-cron" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.389010 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a1342b2-9a0f-42fe-85f4-d706eb2587d4" containerName="mysqld-exporter" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.389131 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee7b8559-e810-42ef-99b7-e206a817fd29" containerName="keystone-cron" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.389293 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="a52007f7-ea68-422e-90d3-be11aa0184d5" containerName="kube-state-metrics" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.394213 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.398439 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.399910 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.401656 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.421772 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.423956 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.428664 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.429064 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.437575 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.542701 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-0"] Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.546143 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc627576-3561-41b8-9d76-f69680c1012a-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"cc627576-3561-41b8-9d76-f69680c1012a\") " pod="openstack/kube-state-metrics-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.546326 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fc1dc61b-323f-46bd-a462-1c60e8d45948-config-data\") pod \"ceilometer-0\" (UID: \"fc1dc61b-323f-46bd-a462-1c60e8d45948\") " pod="openstack/ceilometer-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.546454 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fc1dc61b-323f-46bd-a462-1c60e8d45948-log-httpd\") pod \"ceilometer-0\" (UID: \"fc1dc61b-323f-46bd-a462-1c60e8d45948\") " pod="openstack/ceilometer-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.546493 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc627576-3561-41b8-9d76-f69680c1012a-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"cc627576-3561-41b8-9d76-f69680c1012a\") " pod="openstack/kube-state-metrics-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.546582 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fc1dc61b-323f-46bd-a462-1c60e8d45948-run-httpd\") pod \"ceilometer-0\" (UID: \"fc1dc61b-323f-46bd-a462-1c60e8d45948\") " pod="openstack/ceilometer-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.546609 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fc1dc61b-323f-46bd-a462-1c60e8d45948-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fc1dc61b-323f-46bd-a462-1c60e8d45948\") " pod="openstack/ceilometer-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.546661 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ch5sr\" (UniqueName: \"kubernetes.io/projected/cc627576-3561-41b8-9d76-f69680c1012a-kube-api-access-ch5sr\") pod \"kube-state-metrics-0\" (UID: \"cc627576-3561-41b8-9d76-f69680c1012a\") " pod="openstack/kube-state-metrics-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.546944 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2zsxk\" (UniqueName: \"kubernetes.io/projected/fc1dc61b-323f-46bd-a462-1c60e8d45948-kube-api-access-2zsxk\") pod \"ceilometer-0\" (UID: \"fc1dc61b-323f-46bd-a462-1c60e8d45948\") " pod="openstack/ceilometer-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.546978 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fc1dc61b-323f-46bd-a462-1c60e8d45948-scripts\") pod \"ceilometer-0\" (UID: \"fc1dc61b-323f-46bd-a462-1c60e8d45948\") " pod="openstack/ceilometer-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.547012 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc1dc61b-323f-46bd-a462-1c60e8d45948-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fc1dc61b-323f-46bd-a462-1c60e8d45948\") " pod="openstack/ceilometer-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.547051 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/cc627576-3561-41b8-9d76-f69680c1012a-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"cc627576-3561-41b8-9d76-f69680c1012a\") " pod="openstack/kube-state-metrics-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.565724 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-0"] Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.578538 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-0"] Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.580562 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.585048 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-mysqld-exporter-svc" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.585148 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-config-data" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.592207 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.650350 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fc1dc61b-323f-46bd-a462-1c60e8d45948-log-httpd\") pod \"ceilometer-0\" (UID: \"fc1dc61b-323f-46bd-a462-1c60e8d45948\") " pod="openstack/ceilometer-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.650413 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc627576-3561-41b8-9d76-f69680c1012a-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"cc627576-3561-41b8-9d76-f69680c1012a\") " pod="openstack/kube-state-metrics-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.650489 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fc1dc61b-323f-46bd-a462-1c60e8d45948-run-httpd\") pod \"ceilometer-0\" (UID: \"fc1dc61b-323f-46bd-a462-1c60e8d45948\") " pod="openstack/ceilometer-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.650512 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fc1dc61b-323f-46bd-a462-1c60e8d45948-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fc1dc61b-323f-46bd-a462-1c60e8d45948\") " pod="openstack/ceilometer-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.650559 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ch5sr\" (UniqueName: \"kubernetes.io/projected/cc627576-3561-41b8-9d76-f69680c1012a-kube-api-access-ch5sr\") pod \"kube-state-metrics-0\" (UID: \"cc627576-3561-41b8-9d76-f69680c1012a\") " pod="openstack/kube-state-metrics-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.650596 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2zsxk\" (UniqueName: \"kubernetes.io/projected/fc1dc61b-323f-46bd-a462-1c60e8d45948-kube-api-access-2zsxk\") pod \"ceilometer-0\" (UID: \"fc1dc61b-323f-46bd-a462-1c60e8d45948\") " pod="openstack/ceilometer-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.650621 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fc1dc61b-323f-46bd-a462-1c60e8d45948-scripts\") pod \"ceilometer-0\" (UID: \"fc1dc61b-323f-46bd-a462-1c60e8d45948\") " pod="openstack/ceilometer-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.650648 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc1dc61b-323f-46bd-a462-1c60e8d45948-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fc1dc61b-323f-46bd-a462-1c60e8d45948\") " pod="openstack/ceilometer-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.650778 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/cc627576-3561-41b8-9d76-f69680c1012a-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"cc627576-3561-41b8-9d76-f69680c1012a\") " pod="openstack/kube-state-metrics-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.650954 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc627576-3561-41b8-9d76-f69680c1012a-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"cc627576-3561-41b8-9d76-f69680c1012a\") " pod="openstack/kube-state-metrics-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.651087 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fc1dc61b-323f-46bd-a462-1c60e8d45948-config-data\") pod \"ceilometer-0\" (UID: \"fc1dc61b-323f-46bd-a462-1c60e8d45948\") " pod="openstack/ceilometer-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.653391 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fc1dc61b-323f-46bd-a462-1c60e8d45948-log-httpd\") pod \"ceilometer-0\" (UID: \"fc1dc61b-323f-46bd-a462-1c60e8d45948\") " pod="openstack/ceilometer-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.654712 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fc1dc61b-323f-46bd-a462-1c60e8d45948-run-httpd\") pod \"ceilometer-0\" (UID: \"fc1dc61b-323f-46bd-a462-1c60e8d45948\") " pod="openstack/ceilometer-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.657214 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc627576-3561-41b8-9d76-f69680c1012a-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"cc627576-3561-41b8-9d76-f69680c1012a\") " pod="openstack/kube-state-metrics-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.658226 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fc1dc61b-323f-46bd-a462-1c60e8d45948-config-data\") pod \"ceilometer-0\" (UID: \"fc1dc61b-323f-46bd-a462-1c60e8d45948\") " pod="openstack/ceilometer-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.659217 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/cc627576-3561-41b8-9d76-f69680c1012a-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"cc627576-3561-41b8-9d76-f69680c1012a\") " pod="openstack/kube-state-metrics-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.659970 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fc1dc61b-323f-46bd-a462-1c60e8d45948-scripts\") pod \"ceilometer-0\" (UID: \"fc1dc61b-323f-46bd-a462-1c60e8d45948\") " pod="openstack/ceilometer-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.662444 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc1dc61b-323f-46bd-a462-1c60e8d45948-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fc1dc61b-323f-46bd-a462-1c60e8d45948\") " pod="openstack/ceilometer-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.666589 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fc1dc61b-323f-46bd-a462-1c60e8d45948-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fc1dc61b-323f-46bd-a462-1c60e8d45948\") " pod="openstack/ceilometer-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.668139 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ch5sr\" (UniqueName: \"kubernetes.io/projected/cc627576-3561-41b8-9d76-f69680c1012a-kube-api-access-ch5sr\") pod \"kube-state-metrics-0\" (UID: \"cc627576-3561-41b8-9d76-f69680c1012a\") " pod="openstack/kube-state-metrics-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.673233 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc627576-3561-41b8-9d76-f69680c1012a-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"cc627576-3561-41b8-9d76-f69680c1012a\") " pod="openstack/kube-state-metrics-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.675542 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2zsxk\" (UniqueName: \"kubernetes.io/projected/fc1dc61b-323f-46bd-a462-1c60e8d45948-kube-api-access-2zsxk\") pod \"ceilometer-0\" (UID: \"fc1dc61b-323f-46bd-a462-1c60e8d45948\") " pod="openstack/ceilometer-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.753009 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-btwm2\" (UniqueName: \"kubernetes.io/projected/35ef28aa-b004-4616-8de8-0a88444ab5f2-kube-api-access-btwm2\") pod \"mysqld-exporter-0\" (UID: \"35ef28aa-b004-4616-8de8-0a88444ab5f2\") " pod="openstack/mysqld-exporter-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.753332 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/35ef28aa-b004-4616-8de8-0a88444ab5f2-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"35ef28aa-b004-4616-8de8-0a88444ab5f2\") " pod="openstack/mysqld-exporter-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.753483 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35ef28aa-b004-4616-8de8-0a88444ab5f2-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"35ef28aa-b004-4616-8de8-0a88444ab5f2\") " pod="openstack/mysqld-exporter-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.753502 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35ef28aa-b004-4616-8de8-0a88444ab5f2-config-data\") pod \"mysqld-exporter-0\" (UID: \"35ef28aa-b004-4616-8de8-0a88444ab5f2\") " pod="openstack/mysqld-exporter-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.768206 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.782078 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.855596 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35ef28aa-b004-4616-8de8-0a88444ab5f2-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"35ef28aa-b004-4616-8de8-0a88444ab5f2\") " pod="openstack/mysqld-exporter-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.855653 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35ef28aa-b004-4616-8de8-0a88444ab5f2-config-data\") pod \"mysqld-exporter-0\" (UID: \"35ef28aa-b004-4616-8de8-0a88444ab5f2\") " pod="openstack/mysqld-exporter-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.855796 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-btwm2\" (UniqueName: \"kubernetes.io/projected/35ef28aa-b004-4616-8de8-0a88444ab5f2-kube-api-access-btwm2\") pod \"mysqld-exporter-0\" (UID: \"35ef28aa-b004-4616-8de8-0a88444ab5f2\") " pod="openstack/mysqld-exporter-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.855867 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/35ef28aa-b004-4616-8de8-0a88444ab5f2-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"35ef28aa-b004-4616-8de8-0a88444ab5f2\") " pod="openstack/mysqld-exporter-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.859982 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35ef28aa-b004-4616-8de8-0a88444ab5f2-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"35ef28aa-b004-4616-8de8-0a88444ab5f2\") " pod="openstack/mysqld-exporter-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.859982 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/35ef28aa-b004-4616-8de8-0a88444ab5f2-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"35ef28aa-b004-4616-8de8-0a88444ab5f2\") " pod="openstack/mysqld-exporter-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.865490 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35ef28aa-b004-4616-8de8-0a88444ab5f2-config-data\") pod \"mysqld-exporter-0\" (UID: \"35ef28aa-b004-4616-8de8-0a88444ab5f2\") " pod="openstack/mysqld-exporter-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.883271 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-btwm2\" (UniqueName: \"kubernetes.io/projected/35ef28aa-b004-4616-8de8-0a88444ab5f2-kube-api-access-btwm2\") pod \"mysqld-exporter-0\" (UID: \"35ef28aa-b004-4616-8de8-0a88444ab5f2\") " pod="openstack/mysqld-exporter-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.904352 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Nov 25 10:01:09 crc kubenswrapper[4854]: I1125 10:01:09.996829 4854 scope.go:117] "RemoveContainer" containerID="847bbab51518ae8e9b903c06b8d6d932fd6de9eec6bba9ff7eaa2caef4c61e1c" Nov 25 10:01:10 crc kubenswrapper[4854]: I1125 10:01:10.253869 4854 scope.go:117] "RemoveContainer" containerID="847bbab51518ae8e9b903c06b8d6d932fd6de9eec6bba9ff7eaa2caef4c61e1c" Nov 25 10:01:10 crc kubenswrapper[4854]: E1125 10:01:10.254375 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"847bbab51518ae8e9b903c06b8d6d932fd6de9eec6bba9ff7eaa2caef4c61e1c\": container with ID starting with 847bbab51518ae8e9b903c06b8d6d932fd6de9eec6bba9ff7eaa2caef4c61e1c not found: ID does not exist" containerID="847bbab51518ae8e9b903c06b8d6d932fd6de9eec6bba9ff7eaa2caef4c61e1c" Nov 25 10:01:10 crc kubenswrapper[4854]: I1125 10:01:10.254402 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"847bbab51518ae8e9b903c06b8d6d932fd6de9eec6bba9ff7eaa2caef4c61e1c"} err="failed to get container status \"847bbab51518ae8e9b903c06b8d6d932fd6de9eec6bba9ff7eaa2caef4c61e1c\": rpc error: code = NotFound desc = could not find container \"847bbab51518ae8e9b903c06b8d6d932fd6de9eec6bba9ff7eaa2caef4c61e1c\": container with ID starting with 847bbab51518ae8e9b903c06b8d6d932fd6de9eec6bba9ff7eaa2caef4c61e1c not found: ID does not exist" Nov 25 10:01:10 crc kubenswrapper[4854]: I1125 10:01:10.989079 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Nov 25 10:01:11 crc kubenswrapper[4854]: I1125 10:01:11.004480 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 10:01:11 crc kubenswrapper[4854]: I1125 10:01:11.044230 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a1342b2-9a0f-42fe-85f4-d706eb2587d4" path="/var/lib/kubelet/pods/8a1342b2-9a0f-42fe-85f4-d706eb2587d4/volumes" Nov 25 10:01:11 crc kubenswrapper[4854]: I1125 10:01:11.045954 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="935c8b5d-8fc1-4846-a5aa-6794a4833d9d" path="/var/lib/kubelet/pods/935c8b5d-8fc1-4846-a5aa-6794a4833d9d/volumes" Nov 25 10:01:11 crc kubenswrapper[4854]: I1125 10:01:11.046857 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a52007f7-ea68-422e-90d3-be11aa0184d5" path="/var/lib/kubelet/pods/a52007f7-ea68-422e-90d3-be11aa0184d5/volumes" Nov 25 10:01:11 crc kubenswrapper[4854]: I1125 10:01:11.131900 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:01:11 crc kubenswrapper[4854]: I1125 10:01:11.280411 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"35ef28aa-b004-4616-8de8-0a88444ab5f2","Type":"ContainerStarted","Data":"47de64b7b1971f9d87326f31497fef7657db3dbdd300a68eff3393ca9eff7264"} Nov 25 10:01:11 crc kubenswrapper[4854]: I1125 10:01:11.282562 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"cc627576-3561-41b8-9d76-f69680c1012a","Type":"ContainerStarted","Data":"f29029da9037436305acf488646bf3f7c2d1a9ed5c1b55483fc40b84e213f1a5"} Nov 25 10:01:11 crc kubenswrapper[4854]: I1125 10:01:11.284341 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fc1dc61b-323f-46bd-a462-1c60e8d45948","Type":"ContainerStarted","Data":"a8bba6561e77700dbd4d438cc5d2d41e4f36664c9184673b4a3a31d6f7fa9ffb"} Nov 25 10:01:11 crc kubenswrapper[4854]: I1125 10:01:11.287326 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e9b28076-56b0-47d7-a0b5-f82956ea494a","Type":"ContainerStarted","Data":"532e8e0a4021f2fe09a70c15e567d2dce217368fdd813a7fa6a538670e24324f"} Nov 25 10:01:11 crc kubenswrapper[4854]: I1125 10:01:11.287505 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="e9b28076-56b0-47d7-a0b5-f82956ea494a" containerName="aodh-api" containerID="cri-o://61abe6eb764a470729cef52909a645b5a60402274b0a87ae475601835768bf01" gracePeriod=30 Nov 25 10:01:11 crc kubenswrapper[4854]: I1125 10:01:11.288263 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="e9b28076-56b0-47d7-a0b5-f82956ea494a" containerName="aodh-listener" containerID="cri-o://532e8e0a4021f2fe09a70c15e567d2dce217368fdd813a7fa6a538670e24324f" gracePeriod=30 Nov 25 10:01:11 crc kubenswrapper[4854]: I1125 10:01:11.288334 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="e9b28076-56b0-47d7-a0b5-f82956ea494a" containerName="aodh-notifier" containerID="cri-o://7da4eab9ec0ee3a10ad4e4fb28dbb1690c4aef49e5205cd9f264cd2d8389f7f6" gracePeriod=30 Nov 25 10:01:11 crc kubenswrapper[4854]: I1125 10:01:11.288393 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="e9b28076-56b0-47d7-a0b5-f82956ea494a" containerName="aodh-evaluator" containerID="cri-o://faa2552e1b2ce50a4cf818dd04e46468871f8cdf07be841a6e153bb1ba69320a" gracePeriod=30 Nov 25 10:01:11 crc kubenswrapper[4854]: I1125 10:01:11.333214 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=2.360319407 podStartE2EDuration="11.333190068s" podCreationTimestamp="2025-11-25 10:01:00 +0000 UTC" firstStartedPulling="2025-11-25 10:01:01.351055533 +0000 UTC m=+1467.204048909" lastFinishedPulling="2025-11-25 10:01:10.323926194 +0000 UTC m=+1476.176919570" observedRunningTime="2025-11-25 10:01:11.321210597 +0000 UTC m=+1477.174203983" watchObservedRunningTime="2025-11-25 10:01:11.333190068 +0000 UTC m=+1477.186183444" Nov 25 10:01:11 crc kubenswrapper[4854]: I1125 10:01:11.994302 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:01:12 crc kubenswrapper[4854]: I1125 10:01:12.312380 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"35ef28aa-b004-4616-8de8-0a88444ab5f2","Type":"ContainerStarted","Data":"23a508694e8c92bc7ff55426b54e6e065b2d4a033d2bdebaf8d047c50dba2a00"} Nov 25 10:01:12 crc kubenswrapper[4854]: I1125 10:01:12.315974 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"cc627576-3561-41b8-9d76-f69680c1012a","Type":"ContainerStarted","Data":"6ba453258c80e6db6cc85bc61555a5995f27f77242f6b997f012c7b27dad21bf"} Nov 25 10:01:12 crc kubenswrapper[4854]: I1125 10:01:12.316102 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 25 10:01:12 crc kubenswrapper[4854]: I1125 10:01:12.325039 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fc1dc61b-323f-46bd-a462-1c60e8d45948","Type":"ContainerStarted","Data":"342c523ce2d57df47a3ec08111f07e00e2f4b77bd80f618cf9e7ec7e06331b33"} Nov 25 10:01:12 crc kubenswrapper[4854]: I1125 10:01:12.331426 4854 generic.go:334] "Generic (PLEG): container finished" podID="e9b28076-56b0-47d7-a0b5-f82956ea494a" containerID="faa2552e1b2ce50a4cf818dd04e46468871f8cdf07be841a6e153bb1ba69320a" exitCode=0 Nov 25 10:01:12 crc kubenswrapper[4854]: I1125 10:01:12.331462 4854 generic.go:334] "Generic (PLEG): container finished" podID="e9b28076-56b0-47d7-a0b5-f82956ea494a" containerID="61abe6eb764a470729cef52909a645b5a60402274b0a87ae475601835768bf01" exitCode=0 Nov 25 10:01:12 crc kubenswrapper[4854]: I1125 10:01:12.331488 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e9b28076-56b0-47d7-a0b5-f82956ea494a","Type":"ContainerDied","Data":"faa2552e1b2ce50a4cf818dd04e46468871f8cdf07be841a6e153bb1ba69320a"} Nov 25 10:01:12 crc kubenswrapper[4854]: I1125 10:01:12.331518 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e9b28076-56b0-47d7-a0b5-f82956ea494a","Type":"ContainerDied","Data":"61abe6eb764a470729cef52909a645b5a60402274b0a87ae475601835768bf01"} Nov 25 10:01:12 crc kubenswrapper[4854]: I1125 10:01:12.346550 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-0" podStartSLOduration=2.756829209 podStartE2EDuration="3.346532285s" podCreationTimestamp="2025-11-25 10:01:09 +0000 UTC" firstStartedPulling="2025-11-25 10:01:10.9953274 +0000 UTC m=+1476.848320776" lastFinishedPulling="2025-11-25 10:01:11.585030466 +0000 UTC m=+1477.438023852" observedRunningTime="2025-11-25 10:01:12.343919533 +0000 UTC m=+1478.196912909" watchObservedRunningTime="2025-11-25 10:01:12.346532285 +0000 UTC m=+1478.199525661" Nov 25 10:01:12 crc kubenswrapper[4854]: I1125 10:01:12.375923 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.967296421 podStartE2EDuration="3.375897597s" podCreationTimestamp="2025-11-25 10:01:09 +0000 UTC" firstStartedPulling="2025-11-25 10:01:11.000376089 +0000 UTC m=+1476.853369475" lastFinishedPulling="2025-11-25 10:01:11.408977275 +0000 UTC m=+1477.261970651" observedRunningTime="2025-11-25 10:01:12.375233799 +0000 UTC m=+1478.228227165" watchObservedRunningTime="2025-11-25 10:01:12.375897597 +0000 UTC m=+1478.228890993" Nov 25 10:01:12 crc kubenswrapper[4854]: I1125 10:01:12.500367 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 25 10:01:13 crc kubenswrapper[4854]: I1125 10:01:13.360203 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fc1dc61b-323f-46bd-a462-1c60e8d45948","Type":"ContainerStarted","Data":"1d5404b21bcdcb0d26e7f6b628fd620713bf92f4ae4b73175401ac0e9a892d98"} Nov 25 10:01:13 crc kubenswrapper[4854]: I1125 10:01:13.366102 4854 generic.go:334] "Generic (PLEG): container finished" podID="e9b28076-56b0-47d7-a0b5-f82956ea494a" containerID="7da4eab9ec0ee3a10ad4e4fb28dbb1690c4aef49e5205cd9f264cd2d8389f7f6" exitCode=0 Nov 25 10:01:13 crc kubenswrapper[4854]: I1125 10:01:13.367196 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e9b28076-56b0-47d7-a0b5-f82956ea494a","Type":"ContainerDied","Data":"7da4eab9ec0ee3a10ad4e4fb28dbb1690c4aef49e5205cd9f264cd2d8389f7f6"} Nov 25 10:01:14 crc kubenswrapper[4854]: I1125 10:01:14.379867 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fc1dc61b-323f-46bd-a462-1c60e8d45948","Type":"ContainerStarted","Data":"612c939145e4b538a56369bd165356270a62e73e282819143e4310dae89718e5"} Nov 25 10:01:16 crc kubenswrapper[4854]: I1125 10:01:16.426473 4854 generic.go:334] "Generic (PLEG): container finished" podID="fc1dc61b-323f-46bd-a462-1c60e8d45948" containerID="4153d925cd534d617a89f56b517b9b3d227d0167229c0f518816355806ffe349" exitCode=1 Nov 25 10:01:16 crc kubenswrapper[4854]: I1125 10:01:16.426965 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fc1dc61b-323f-46bd-a462-1c60e8d45948" containerName="ceilometer-central-agent" containerID="cri-o://342c523ce2d57df47a3ec08111f07e00e2f4b77bd80f618cf9e7ec7e06331b33" gracePeriod=30 Nov 25 10:01:16 crc kubenswrapper[4854]: I1125 10:01:16.427164 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fc1dc61b-323f-46bd-a462-1c60e8d45948","Type":"ContainerDied","Data":"4153d925cd534d617a89f56b517b9b3d227d0167229c0f518816355806ffe349"} Nov 25 10:01:16 crc kubenswrapper[4854]: I1125 10:01:16.427746 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fc1dc61b-323f-46bd-a462-1c60e8d45948" containerName="sg-core" containerID="cri-o://612c939145e4b538a56369bd165356270a62e73e282819143e4310dae89718e5" gracePeriod=30 Nov 25 10:01:16 crc kubenswrapper[4854]: I1125 10:01:16.427882 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fc1dc61b-323f-46bd-a462-1c60e8d45948" containerName="ceilometer-notification-agent" containerID="cri-o://1d5404b21bcdcb0d26e7f6b628fd620713bf92f4ae4b73175401ac0e9a892d98" gracePeriod=30 Nov 25 10:01:16 crc kubenswrapper[4854]: E1125 10:01:16.475463 4854 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda16432da_436b_4d4c_b383_e009ff4a4ff6.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfc1dc61b_323f_46bd_a462_1c60e8d45948.slice/crio-4153d925cd534d617a89f56b517b9b3d227d0167229c0f518816355806ffe349.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfc1dc61b_323f_46bd_a462_1c60e8d45948.slice/crio-conmon-4153d925cd534d617a89f56b517b9b3d227d0167229c0f518816355806ffe349.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda16432da_436b_4d4c_b383_e009ff4a4ff6.slice/crio-fc1ce70cb30329b8b1d10d10efe6bb7b931f1af836e0f1bdb147f97c6bbd3eec\": RecentStats: unable to find data in memory cache]" Nov 25 10:01:16 crc kubenswrapper[4854]: E1125 10:01:16.492093 4854 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda16432da_436b_4d4c_b383_e009ff4a4ff6.slice/crio-fc1ce70cb30329b8b1d10d10efe6bb7b931f1af836e0f1bdb147f97c6bbd3eec\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda16432da_436b_4d4c_b383_e009ff4a4ff6.slice\": RecentStats: unable to find data in memory cache]" Nov 25 10:01:17 crc kubenswrapper[4854]: I1125 10:01:17.265869 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 10:01:17 crc kubenswrapper[4854]: I1125 10:01:17.266545 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 10:01:17 crc kubenswrapper[4854]: I1125 10:01:17.270240 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 10:01:17 crc kubenswrapper[4854]: I1125 10:01:17.270715 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 10:01:17 crc kubenswrapper[4854]: I1125 10:01:17.451969 4854 generic.go:334] "Generic (PLEG): container finished" podID="fc1dc61b-323f-46bd-a462-1c60e8d45948" containerID="612c939145e4b538a56369bd165356270a62e73e282819143e4310dae89718e5" exitCode=2 Nov 25 10:01:17 crc kubenswrapper[4854]: I1125 10:01:17.452000 4854 generic.go:334] "Generic (PLEG): container finished" podID="fc1dc61b-323f-46bd-a462-1c60e8d45948" containerID="1d5404b21bcdcb0d26e7f6b628fd620713bf92f4ae4b73175401ac0e9a892d98" exitCode=0 Nov 25 10:01:17 crc kubenswrapper[4854]: I1125 10:01:17.453297 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fc1dc61b-323f-46bd-a462-1c60e8d45948","Type":"ContainerDied","Data":"612c939145e4b538a56369bd165356270a62e73e282819143e4310dae89718e5"} Nov 25 10:01:17 crc kubenswrapper[4854]: I1125 10:01:17.453377 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fc1dc61b-323f-46bd-a462-1c60e8d45948","Type":"ContainerDied","Data":"1d5404b21bcdcb0d26e7f6b628fd620713bf92f4ae4b73175401ac0e9a892d98"} Nov 25 10:01:17 crc kubenswrapper[4854]: I1125 10:01:17.453406 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 10:01:17 crc kubenswrapper[4854]: I1125 10:01:17.458269 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 10:01:17 crc kubenswrapper[4854]: I1125 10:01:17.651006 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-f84f9ccf-gzlbt"] Nov 25 10:01:17 crc kubenswrapper[4854]: I1125 10:01:17.653576 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f84f9ccf-gzlbt" Nov 25 10:01:17 crc kubenswrapper[4854]: I1125 10:01:17.677427 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f84f9ccf-gzlbt"] Nov 25 10:01:17 crc kubenswrapper[4854]: I1125 10:01:17.796149 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b9e714ae-48fc-4b5a-97b5-faa9a902e431-dns-swift-storage-0\") pod \"dnsmasq-dns-f84f9ccf-gzlbt\" (UID: \"b9e714ae-48fc-4b5a-97b5-faa9a902e431\") " pod="openstack/dnsmasq-dns-f84f9ccf-gzlbt" Nov 25 10:01:17 crc kubenswrapper[4854]: I1125 10:01:17.796206 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b9e714ae-48fc-4b5a-97b5-faa9a902e431-config\") pod \"dnsmasq-dns-f84f9ccf-gzlbt\" (UID: \"b9e714ae-48fc-4b5a-97b5-faa9a902e431\") " pod="openstack/dnsmasq-dns-f84f9ccf-gzlbt" Nov 25 10:01:17 crc kubenswrapper[4854]: I1125 10:01:17.796572 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b9e714ae-48fc-4b5a-97b5-faa9a902e431-ovsdbserver-nb\") pod \"dnsmasq-dns-f84f9ccf-gzlbt\" (UID: \"b9e714ae-48fc-4b5a-97b5-faa9a902e431\") " pod="openstack/dnsmasq-dns-f84f9ccf-gzlbt" Nov 25 10:01:17 crc kubenswrapper[4854]: I1125 10:01:17.796893 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gmsjh\" (UniqueName: \"kubernetes.io/projected/b9e714ae-48fc-4b5a-97b5-faa9a902e431-kube-api-access-gmsjh\") pod \"dnsmasq-dns-f84f9ccf-gzlbt\" (UID: \"b9e714ae-48fc-4b5a-97b5-faa9a902e431\") " pod="openstack/dnsmasq-dns-f84f9ccf-gzlbt" Nov 25 10:01:17 crc kubenswrapper[4854]: I1125 10:01:17.796972 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b9e714ae-48fc-4b5a-97b5-faa9a902e431-ovsdbserver-sb\") pod \"dnsmasq-dns-f84f9ccf-gzlbt\" (UID: \"b9e714ae-48fc-4b5a-97b5-faa9a902e431\") " pod="openstack/dnsmasq-dns-f84f9ccf-gzlbt" Nov 25 10:01:17 crc kubenswrapper[4854]: I1125 10:01:17.797022 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b9e714ae-48fc-4b5a-97b5-faa9a902e431-dns-svc\") pod \"dnsmasq-dns-f84f9ccf-gzlbt\" (UID: \"b9e714ae-48fc-4b5a-97b5-faa9a902e431\") " pod="openstack/dnsmasq-dns-f84f9ccf-gzlbt" Nov 25 10:01:17 crc kubenswrapper[4854]: I1125 10:01:17.898936 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b9e714ae-48fc-4b5a-97b5-faa9a902e431-ovsdbserver-nb\") pod \"dnsmasq-dns-f84f9ccf-gzlbt\" (UID: \"b9e714ae-48fc-4b5a-97b5-faa9a902e431\") " pod="openstack/dnsmasq-dns-f84f9ccf-gzlbt" Nov 25 10:01:17 crc kubenswrapper[4854]: I1125 10:01:17.899125 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gmsjh\" (UniqueName: \"kubernetes.io/projected/b9e714ae-48fc-4b5a-97b5-faa9a902e431-kube-api-access-gmsjh\") pod \"dnsmasq-dns-f84f9ccf-gzlbt\" (UID: \"b9e714ae-48fc-4b5a-97b5-faa9a902e431\") " pod="openstack/dnsmasq-dns-f84f9ccf-gzlbt" Nov 25 10:01:17 crc kubenswrapper[4854]: I1125 10:01:17.899163 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b9e714ae-48fc-4b5a-97b5-faa9a902e431-ovsdbserver-sb\") pod \"dnsmasq-dns-f84f9ccf-gzlbt\" (UID: \"b9e714ae-48fc-4b5a-97b5-faa9a902e431\") " pod="openstack/dnsmasq-dns-f84f9ccf-gzlbt" Nov 25 10:01:17 crc kubenswrapper[4854]: I1125 10:01:17.899185 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b9e714ae-48fc-4b5a-97b5-faa9a902e431-dns-svc\") pod \"dnsmasq-dns-f84f9ccf-gzlbt\" (UID: \"b9e714ae-48fc-4b5a-97b5-faa9a902e431\") " pod="openstack/dnsmasq-dns-f84f9ccf-gzlbt" Nov 25 10:01:17 crc kubenswrapper[4854]: I1125 10:01:17.899256 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b9e714ae-48fc-4b5a-97b5-faa9a902e431-dns-swift-storage-0\") pod \"dnsmasq-dns-f84f9ccf-gzlbt\" (UID: \"b9e714ae-48fc-4b5a-97b5-faa9a902e431\") " pod="openstack/dnsmasq-dns-f84f9ccf-gzlbt" Nov 25 10:01:17 crc kubenswrapper[4854]: I1125 10:01:17.899311 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b9e714ae-48fc-4b5a-97b5-faa9a902e431-config\") pod \"dnsmasq-dns-f84f9ccf-gzlbt\" (UID: \"b9e714ae-48fc-4b5a-97b5-faa9a902e431\") " pod="openstack/dnsmasq-dns-f84f9ccf-gzlbt" Nov 25 10:01:17 crc kubenswrapper[4854]: I1125 10:01:17.899999 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b9e714ae-48fc-4b5a-97b5-faa9a902e431-ovsdbserver-nb\") pod \"dnsmasq-dns-f84f9ccf-gzlbt\" (UID: \"b9e714ae-48fc-4b5a-97b5-faa9a902e431\") " pod="openstack/dnsmasq-dns-f84f9ccf-gzlbt" Nov 25 10:01:17 crc kubenswrapper[4854]: I1125 10:01:17.900070 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b9e714ae-48fc-4b5a-97b5-faa9a902e431-config\") pod \"dnsmasq-dns-f84f9ccf-gzlbt\" (UID: \"b9e714ae-48fc-4b5a-97b5-faa9a902e431\") " pod="openstack/dnsmasq-dns-f84f9ccf-gzlbt" Nov 25 10:01:17 crc kubenswrapper[4854]: I1125 10:01:17.900185 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b9e714ae-48fc-4b5a-97b5-faa9a902e431-ovsdbserver-sb\") pod \"dnsmasq-dns-f84f9ccf-gzlbt\" (UID: \"b9e714ae-48fc-4b5a-97b5-faa9a902e431\") " pod="openstack/dnsmasq-dns-f84f9ccf-gzlbt" Nov 25 10:01:17 crc kubenswrapper[4854]: I1125 10:01:17.900289 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b9e714ae-48fc-4b5a-97b5-faa9a902e431-dns-swift-storage-0\") pod \"dnsmasq-dns-f84f9ccf-gzlbt\" (UID: \"b9e714ae-48fc-4b5a-97b5-faa9a902e431\") " pod="openstack/dnsmasq-dns-f84f9ccf-gzlbt" Nov 25 10:01:17 crc kubenswrapper[4854]: I1125 10:01:17.900360 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b9e714ae-48fc-4b5a-97b5-faa9a902e431-dns-svc\") pod \"dnsmasq-dns-f84f9ccf-gzlbt\" (UID: \"b9e714ae-48fc-4b5a-97b5-faa9a902e431\") " pod="openstack/dnsmasq-dns-f84f9ccf-gzlbt" Nov 25 10:01:17 crc kubenswrapper[4854]: I1125 10:01:17.917616 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gmsjh\" (UniqueName: \"kubernetes.io/projected/b9e714ae-48fc-4b5a-97b5-faa9a902e431-kube-api-access-gmsjh\") pod \"dnsmasq-dns-f84f9ccf-gzlbt\" (UID: \"b9e714ae-48fc-4b5a-97b5-faa9a902e431\") " pod="openstack/dnsmasq-dns-f84f9ccf-gzlbt" Nov 25 10:01:17 crc kubenswrapper[4854]: I1125 10:01:17.984898 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f84f9ccf-gzlbt" Nov 25 10:01:18 crc kubenswrapper[4854]: I1125 10:01:18.596089 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f84f9ccf-gzlbt"] Nov 25 10:01:19 crc kubenswrapper[4854]: I1125 10:01:19.499489 4854 generic.go:334] "Generic (PLEG): container finished" podID="b9e714ae-48fc-4b5a-97b5-faa9a902e431" containerID="42b8951e042daee0ad7de783340763a95f09a13dc766c228bd858b7335b14bf5" exitCode=0 Nov 25 10:01:19 crc kubenswrapper[4854]: I1125 10:01:19.500693 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f84f9ccf-gzlbt" event={"ID":"b9e714ae-48fc-4b5a-97b5-faa9a902e431","Type":"ContainerDied","Data":"42b8951e042daee0ad7de783340763a95f09a13dc766c228bd858b7335b14bf5"} Nov 25 10:01:19 crc kubenswrapper[4854]: I1125 10:01:19.500729 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f84f9ccf-gzlbt" event={"ID":"b9e714ae-48fc-4b5a-97b5-faa9a902e431","Type":"ContainerStarted","Data":"63ba9027cd9b047ca15b15ac4e5aff4cddf08f32efe5b355c786619e1df3b9f6"} Nov 25 10:01:19 crc kubenswrapper[4854]: I1125 10:01:19.794702 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 25 10:01:20 crc kubenswrapper[4854]: I1125 10:01:20.348866 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:01:20 crc kubenswrapper[4854]: I1125 10:01:20.513338 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f84f9ccf-gzlbt" event={"ID":"b9e714ae-48fc-4b5a-97b5-faa9a902e431","Type":"ContainerStarted","Data":"64a53c29fbd1aeaf9a1fa6e01b49af054dddfb813c762a8e7658256c5b35821d"} Nov 25 10:01:20 crc kubenswrapper[4854]: I1125 10:01:20.513491 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="9064c524-7e0e-4327-ae10-b531d78c450b" containerName="nova-api-log" containerID="cri-o://e22958146f2cdc3537831dcf65e2566e5f17adf8e70f6c5cdc2081b29b44397a" gracePeriod=30 Nov 25 10:01:20 crc kubenswrapper[4854]: I1125 10:01:20.513573 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="9064c524-7e0e-4327-ae10-b531d78c450b" containerName="nova-api-api" containerID="cri-o://e262bc08593155d85adc8bedd60ad33f23acf2d35d857aecac9f001124d77d7a" gracePeriod=30 Nov 25 10:01:20 crc kubenswrapper[4854]: I1125 10:01:20.540039 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-f84f9ccf-gzlbt" podStartSLOduration=3.5400204520000003 podStartE2EDuration="3.540020452s" podCreationTimestamp="2025-11-25 10:01:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:01:20.537078791 +0000 UTC m=+1486.390072187" watchObservedRunningTime="2025-11-25 10:01:20.540020452 +0000 UTC m=+1486.393013828" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.271141 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.382988 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc1dc61b-323f-46bd-a462-1c60e8d45948-combined-ca-bundle\") pod \"fc1dc61b-323f-46bd-a462-1c60e8d45948\" (UID: \"fc1dc61b-323f-46bd-a462-1c60e8d45948\") " Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.383105 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fc1dc61b-323f-46bd-a462-1c60e8d45948-run-httpd\") pod \"fc1dc61b-323f-46bd-a462-1c60e8d45948\" (UID: \"fc1dc61b-323f-46bd-a462-1c60e8d45948\") " Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.383126 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fc1dc61b-323f-46bd-a462-1c60e8d45948-sg-core-conf-yaml\") pod \"fc1dc61b-323f-46bd-a462-1c60e8d45948\" (UID: \"fc1dc61b-323f-46bd-a462-1c60e8d45948\") " Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.383289 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2zsxk\" (UniqueName: \"kubernetes.io/projected/fc1dc61b-323f-46bd-a462-1c60e8d45948-kube-api-access-2zsxk\") pod \"fc1dc61b-323f-46bd-a462-1c60e8d45948\" (UID: \"fc1dc61b-323f-46bd-a462-1c60e8d45948\") " Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.383324 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fc1dc61b-323f-46bd-a462-1c60e8d45948-log-httpd\") pod \"fc1dc61b-323f-46bd-a462-1c60e8d45948\" (UID: \"fc1dc61b-323f-46bd-a462-1c60e8d45948\") " Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.383432 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fc1dc61b-323f-46bd-a462-1c60e8d45948-config-data\") pod \"fc1dc61b-323f-46bd-a462-1c60e8d45948\" (UID: \"fc1dc61b-323f-46bd-a462-1c60e8d45948\") " Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.383485 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fc1dc61b-323f-46bd-a462-1c60e8d45948-scripts\") pod \"fc1dc61b-323f-46bd-a462-1c60e8d45948\" (UID: \"fc1dc61b-323f-46bd-a462-1c60e8d45948\") " Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.383500 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fc1dc61b-323f-46bd-a462-1c60e8d45948-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "fc1dc61b-323f-46bd-a462-1c60e8d45948" (UID: "fc1dc61b-323f-46bd-a462-1c60e8d45948"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.383755 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fc1dc61b-323f-46bd-a462-1c60e8d45948-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "fc1dc61b-323f-46bd-a462-1c60e8d45948" (UID: "fc1dc61b-323f-46bd-a462-1c60e8d45948"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.384049 4854 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fc1dc61b-323f-46bd-a462-1c60e8d45948-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.384063 4854 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fc1dc61b-323f-46bd-a462-1c60e8d45948-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.394269 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc1dc61b-323f-46bd-a462-1c60e8d45948-scripts" (OuterVolumeSpecName: "scripts") pod "fc1dc61b-323f-46bd-a462-1c60e8d45948" (UID: "fc1dc61b-323f-46bd-a462-1c60e8d45948"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.429358 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc1dc61b-323f-46bd-a462-1c60e8d45948-kube-api-access-2zsxk" (OuterVolumeSpecName: "kube-api-access-2zsxk") pod "fc1dc61b-323f-46bd-a462-1c60e8d45948" (UID: "fc1dc61b-323f-46bd-a462-1c60e8d45948"). InnerVolumeSpecName "kube-api-access-2zsxk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.474952 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc1dc61b-323f-46bd-a462-1c60e8d45948-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "fc1dc61b-323f-46bd-a462-1c60e8d45948" (UID: "fc1dc61b-323f-46bd-a462-1c60e8d45948"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.492598 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2zsxk\" (UniqueName: \"kubernetes.io/projected/fc1dc61b-323f-46bd-a462-1c60e8d45948-kube-api-access-2zsxk\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.492637 4854 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fc1dc61b-323f-46bd-a462-1c60e8d45948-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.492649 4854 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fc1dc61b-323f-46bd-a462-1c60e8d45948-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.535273 4854 generic.go:334] "Generic (PLEG): container finished" podID="fc1dc61b-323f-46bd-a462-1c60e8d45948" containerID="342c523ce2d57df47a3ec08111f07e00e2f4b77bd80f618cf9e7ec7e06331b33" exitCode=0 Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.535340 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fc1dc61b-323f-46bd-a462-1c60e8d45948","Type":"ContainerDied","Data":"342c523ce2d57df47a3ec08111f07e00e2f4b77bd80f618cf9e7ec7e06331b33"} Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.535369 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fc1dc61b-323f-46bd-a462-1c60e8d45948","Type":"ContainerDied","Data":"a8bba6561e77700dbd4d438cc5d2d41e4f36664c9184673b4a3a31d6f7fa9ffb"} Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.535385 4854 scope.go:117] "RemoveContainer" containerID="4153d925cd534d617a89f56b517b9b3d227d0167229c0f518816355806ffe349" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.535538 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.537103 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc1dc61b-323f-46bd-a462-1c60e8d45948-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fc1dc61b-323f-46bd-a462-1c60e8d45948" (UID: "fc1dc61b-323f-46bd-a462-1c60e8d45948"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.542591 4854 generic.go:334] "Generic (PLEG): container finished" podID="9064c524-7e0e-4327-ae10-b531d78c450b" containerID="e22958146f2cdc3537831dcf65e2566e5f17adf8e70f6c5cdc2081b29b44397a" exitCode=143 Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.542982 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9064c524-7e0e-4327-ae10-b531d78c450b","Type":"ContainerDied","Data":"e22958146f2cdc3537831dcf65e2566e5f17adf8e70f6c5cdc2081b29b44397a"} Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.543971 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-f84f9ccf-gzlbt" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.594488 4854 scope.go:117] "RemoveContainer" containerID="612c939145e4b538a56369bd165356270a62e73e282819143e4310dae89718e5" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.596417 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc1dc61b-323f-46bd-a462-1c60e8d45948-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.615501 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc1dc61b-323f-46bd-a462-1c60e8d45948-config-data" (OuterVolumeSpecName: "config-data") pod "fc1dc61b-323f-46bd-a462-1c60e8d45948" (UID: "fc1dc61b-323f-46bd-a462-1c60e8d45948"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.650305 4854 scope.go:117] "RemoveContainer" containerID="1d5404b21bcdcb0d26e7f6b628fd620713bf92f4ae4b73175401ac0e9a892d98" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.700113 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fc1dc61b-323f-46bd-a462-1c60e8d45948-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.711588 4854 scope.go:117] "RemoveContainer" containerID="342c523ce2d57df47a3ec08111f07e00e2f4b77bd80f618cf9e7ec7e06331b33" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.737332 4854 scope.go:117] "RemoveContainer" containerID="4153d925cd534d617a89f56b517b9b3d227d0167229c0f518816355806ffe349" Nov 25 10:01:21 crc kubenswrapper[4854]: E1125 10:01:21.737724 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4153d925cd534d617a89f56b517b9b3d227d0167229c0f518816355806ffe349\": container with ID starting with 4153d925cd534d617a89f56b517b9b3d227d0167229c0f518816355806ffe349 not found: ID does not exist" containerID="4153d925cd534d617a89f56b517b9b3d227d0167229c0f518816355806ffe349" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.737780 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4153d925cd534d617a89f56b517b9b3d227d0167229c0f518816355806ffe349"} err="failed to get container status \"4153d925cd534d617a89f56b517b9b3d227d0167229c0f518816355806ffe349\": rpc error: code = NotFound desc = could not find container \"4153d925cd534d617a89f56b517b9b3d227d0167229c0f518816355806ffe349\": container with ID starting with 4153d925cd534d617a89f56b517b9b3d227d0167229c0f518816355806ffe349 not found: ID does not exist" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.737802 4854 scope.go:117] "RemoveContainer" containerID="612c939145e4b538a56369bd165356270a62e73e282819143e4310dae89718e5" Nov 25 10:01:21 crc kubenswrapper[4854]: E1125 10:01:21.738215 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"612c939145e4b538a56369bd165356270a62e73e282819143e4310dae89718e5\": container with ID starting with 612c939145e4b538a56369bd165356270a62e73e282819143e4310dae89718e5 not found: ID does not exist" containerID="612c939145e4b538a56369bd165356270a62e73e282819143e4310dae89718e5" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.738260 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"612c939145e4b538a56369bd165356270a62e73e282819143e4310dae89718e5"} err="failed to get container status \"612c939145e4b538a56369bd165356270a62e73e282819143e4310dae89718e5\": rpc error: code = NotFound desc = could not find container \"612c939145e4b538a56369bd165356270a62e73e282819143e4310dae89718e5\": container with ID starting with 612c939145e4b538a56369bd165356270a62e73e282819143e4310dae89718e5 not found: ID does not exist" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.738289 4854 scope.go:117] "RemoveContainer" containerID="1d5404b21bcdcb0d26e7f6b628fd620713bf92f4ae4b73175401ac0e9a892d98" Nov 25 10:01:21 crc kubenswrapper[4854]: E1125 10:01:21.738706 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1d5404b21bcdcb0d26e7f6b628fd620713bf92f4ae4b73175401ac0e9a892d98\": container with ID starting with 1d5404b21bcdcb0d26e7f6b628fd620713bf92f4ae4b73175401ac0e9a892d98 not found: ID does not exist" containerID="1d5404b21bcdcb0d26e7f6b628fd620713bf92f4ae4b73175401ac0e9a892d98" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.738732 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1d5404b21bcdcb0d26e7f6b628fd620713bf92f4ae4b73175401ac0e9a892d98"} err="failed to get container status \"1d5404b21bcdcb0d26e7f6b628fd620713bf92f4ae4b73175401ac0e9a892d98\": rpc error: code = NotFound desc = could not find container \"1d5404b21bcdcb0d26e7f6b628fd620713bf92f4ae4b73175401ac0e9a892d98\": container with ID starting with 1d5404b21bcdcb0d26e7f6b628fd620713bf92f4ae4b73175401ac0e9a892d98 not found: ID does not exist" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.738745 4854 scope.go:117] "RemoveContainer" containerID="342c523ce2d57df47a3ec08111f07e00e2f4b77bd80f618cf9e7ec7e06331b33" Nov 25 10:01:21 crc kubenswrapper[4854]: E1125 10:01:21.739243 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"342c523ce2d57df47a3ec08111f07e00e2f4b77bd80f618cf9e7ec7e06331b33\": container with ID starting with 342c523ce2d57df47a3ec08111f07e00e2f4b77bd80f618cf9e7ec7e06331b33 not found: ID does not exist" containerID="342c523ce2d57df47a3ec08111f07e00e2f4b77bd80f618cf9e7ec7e06331b33" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.739275 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"342c523ce2d57df47a3ec08111f07e00e2f4b77bd80f618cf9e7ec7e06331b33"} err="failed to get container status \"342c523ce2d57df47a3ec08111f07e00e2f4b77bd80f618cf9e7ec7e06331b33\": rpc error: code = NotFound desc = could not find container \"342c523ce2d57df47a3ec08111f07e00e2f4b77bd80f618cf9e7ec7e06331b33\": container with ID starting with 342c523ce2d57df47a3ec08111f07e00e2f4b77bd80f618cf9e7ec7e06331b33 not found: ID does not exist" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.878060 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.893811 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.914126 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:01:21 crc kubenswrapper[4854]: E1125 10:01:21.914757 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc1dc61b-323f-46bd-a462-1c60e8d45948" containerName="sg-core" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.914783 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc1dc61b-323f-46bd-a462-1c60e8d45948" containerName="sg-core" Nov 25 10:01:21 crc kubenswrapper[4854]: E1125 10:01:21.914801 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc1dc61b-323f-46bd-a462-1c60e8d45948" containerName="ceilometer-central-agent" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.914810 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc1dc61b-323f-46bd-a462-1c60e8d45948" containerName="ceilometer-central-agent" Nov 25 10:01:21 crc kubenswrapper[4854]: E1125 10:01:21.914826 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc1dc61b-323f-46bd-a462-1c60e8d45948" containerName="ceilometer-notification-agent" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.914833 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc1dc61b-323f-46bd-a462-1c60e8d45948" containerName="ceilometer-notification-agent" Nov 25 10:01:21 crc kubenswrapper[4854]: E1125 10:01:21.914842 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc1dc61b-323f-46bd-a462-1c60e8d45948" containerName="proxy-httpd" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.914849 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc1dc61b-323f-46bd-a462-1c60e8d45948" containerName="proxy-httpd" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.915098 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc1dc61b-323f-46bd-a462-1c60e8d45948" containerName="sg-core" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.915121 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc1dc61b-323f-46bd-a462-1c60e8d45948" containerName="proxy-httpd" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.915135 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc1dc61b-323f-46bd-a462-1c60e8d45948" containerName="ceilometer-central-agent" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.915161 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc1dc61b-323f-46bd-a462-1c60e8d45948" containerName="ceilometer-notification-agent" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.917615 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.920063 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.920421 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.923427 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 10:01:21 crc kubenswrapper[4854]: I1125 10:01:21.937512 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:01:22 crc kubenswrapper[4854]: I1125 10:01:22.007283 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/709d7464-52a2-4fe9-a323-07839ae3d12d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"709d7464-52a2-4fe9-a323-07839ae3d12d\") " pod="openstack/ceilometer-0" Nov 25 10:01:22 crc kubenswrapper[4854]: I1125 10:01:22.007317 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/709d7464-52a2-4fe9-a323-07839ae3d12d-log-httpd\") pod \"ceilometer-0\" (UID: \"709d7464-52a2-4fe9-a323-07839ae3d12d\") " pod="openstack/ceilometer-0" Nov 25 10:01:22 crc kubenswrapper[4854]: I1125 10:01:22.007452 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/709d7464-52a2-4fe9-a323-07839ae3d12d-scripts\") pod \"ceilometer-0\" (UID: \"709d7464-52a2-4fe9-a323-07839ae3d12d\") " pod="openstack/ceilometer-0" Nov 25 10:01:22 crc kubenswrapper[4854]: I1125 10:01:22.007491 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/709d7464-52a2-4fe9-a323-07839ae3d12d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"709d7464-52a2-4fe9-a323-07839ae3d12d\") " pod="openstack/ceilometer-0" Nov 25 10:01:22 crc kubenswrapper[4854]: I1125 10:01:22.007515 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/709d7464-52a2-4fe9-a323-07839ae3d12d-run-httpd\") pod \"ceilometer-0\" (UID: \"709d7464-52a2-4fe9-a323-07839ae3d12d\") " pod="openstack/ceilometer-0" Nov 25 10:01:22 crc kubenswrapper[4854]: I1125 10:01:22.007552 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nv898\" (UniqueName: \"kubernetes.io/projected/709d7464-52a2-4fe9-a323-07839ae3d12d-kube-api-access-nv898\") pod \"ceilometer-0\" (UID: \"709d7464-52a2-4fe9-a323-07839ae3d12d\") " pod="openstack/ceilometer-0" Nov 25 10:01:22 crc kubenswrapper[4854]: I1125 10:01:22.007571 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/709d7464-52a2-4fe9-a323-07839ae3d12d-config-data\") pod \"ceilometer-0\" (UID: \"709d7464-52a2-4fe9-a323-07839ae3d12d\") " pod="openstack/ceilometer-0" Nov 25 10:01:22 crc kubenswrapper[4854]: I1125 10:01:22.007624 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/709d7464-52a2-4fe9-a323-07839ae3d12d-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"709d7464-52a2-4fe9-a323-07839ae3d12d\") " pod="openstack/ceilometer-0" Nov 25 10:01:22 crc kubenswrapper[4854]: I1125 10:01:22.110784 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/709d7464-52a2-4fe9-a323-07839ae3d12d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"709d7464-52a2-4fe9-a323-07839ae3d12d\") " pod="openstack/ceilometer-0" Nov 25 10:01:22 crc kubenswrapper[4854]: I1125 10:01:22.110851 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/709d7464-52a2-4fe9-a323-07839ae3d12d-log-httpd\") pod \"ceilometer-0\" (UID: \"709d7464-52a2-4fe9-a323-07839ae3d12d\") " pod="openstack/ceilometer-0" Nov 25 10:01:22 crc kubenswrapper[4854]: I1125 10:01:22.111006 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/709d7464-52a2-4fe9-a323-07839ae3d12d-scripts\") pod \"ceilometer-0\" (UID: \"709d7464-52a2-4fe9-a323-07839ae3d12d\") " pod="openstack/ceilometer-0" Nov 25 10:01:22 crc kubenswrapper[4854]: I1125 10:01:22.111065 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/709d7464-52a2-4fe9-a323-07839ae3d12d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"709d7464-52a2-4fe9-a323-07839ae3d12d\") " pod="openstack/ceilometer-0" Nov 25 10:01:22 crc kubenswrapper[4854]: I1125 10:01:22.111090 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/709d7464-52a2-4fe9-a323-07839ae3d12d-run-httpd\") pod \"ceilometer-0\" (UID: \"709d7464-52a2-4fe9-a323-07839ae3d12d\") " pod="openstack/ceilometer-0" Nov 25 10:01:22 crc kubenswrapper[4854]: I1125 10:01:22.111126 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nv898\" (UniqueName: \"kubernetes.io/projected/709d7464-52a2-4fe9-a323-07839ae3d12d-kube-api-access-nv898\") pod \"ceilometer-0\" (UID: \"709d7464-52a2-4fe9-a323-07839ae3d12d\") " pod="openstack/ceilometer-0" Nov 25 10:01:22 crc kubenswrapper[4854]: I1125 10:01:22.111151 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/709d7464-52a2-4fe9-a323-07839ae3d12d-config-data\") pod \"ceilometer-0\" (UID: \"709d7464-52a2-4fe9-a323-07839ae3d12d\") " pod="openstack/ceilometer-0" Nov 25 10:01:22 crc kubenswrapper[4854]: I1125 10:01:22.111252 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/709d7464-52a2-4fe9-a323-07839ae3d12d-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"709d7464-52a2-4fe9-a323-07839ae3d12d\") " pod="openstack/ceilometer-0" Nov 25 10:01:22 crc kubenswrapper[4854]: I1125 10:01:22.113038 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/709d7464-52a2-4fe9-a323-07839ae3d12d-run-httpd\") pod \"ceilometer-0\" (UID: \"709d7464-52a2-4fe9-a323-07839ae3d12d\") " pod="openstack/ceilometer-0" Nov 25 10:01:22 crc kubenswrapper[4854]: I1125 10:01:22.114243 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/709d7464-52a2-4fe9-a323-07839ae3d12d-log-httpd\") pod \"ceilometer-0\" (UID: \"709d7464-52a2-4fe9-a323-07839ae3d12d\") " pod="openstack/ceilometer-0" Nov 25 10:01:22 crc kubenswrapper[4854]: I1125 10:01:22.120431 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/709d7464-52a2-4fe9-a323-07839ae3d12d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"709d7464-52a2-4fe9-a323-07839ae3d12d\") " pod="openstack/ceilometer-0" Nov 25 10:01:22 crc kubenswrapper[4854]: I1125 10:01:22.124471 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/709d7464-52a2-4fe9-a323-07839ae3d12d-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"709d7464-52a2-4fe9-a323-07839ae3d12d\") " pod="openstack/ceilometer-0" Nov 25 10:01:22 crc kubenswrapper[4854]: I1125 10:01:22.124648 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/709d7464-52a2-4fe9-a323-07839ae3d12d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"709d7464-52a2-4fe9-a323-07839ae3d12d\") " pod="openstack/ceilometer-0" Nov 25 10:01:22 crc kubenswrapper[4854]: I1125 10:01:22.124772 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/709d7464-52a2-4fe9-a323-07839ae3d12d-scripts\") pod \"ceilometer-0\" (UID: \"709d7464-52a2-4fe9-a323-07839ae3d12d\") " pod="openstack/ceilometer-0" Nov 25 10:01:22 crc kubenswrapper[4854]: I1125 10:01:22.125272 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/709d7464-52a2-4fe9-a323-07839ae3d12d-config-data\") pod \"ceilometer-0\" (UID: \"709d7464-52a2-4fe9-a323-07839ae3d12d\") " pod="openstack/ceilometer-0" Nov 25 10:01:22 crc kubenswrapper[4854]: I1125 10:01:22.132656 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nv898\" (UniqueName: \"kubernetes.io/projected/709d7464-52a2-4fe9-a323-07839ae3d12d-kube-api-access-nv898\") pod \"ceilometer-0\" (UID: \"709d7464-52a2-4fe9-a323-07839ae3d12d\") " pod="openstack/ceilometer-0" Nov 25 10:01:22 crc kubenswrapper[4854]: I1125 10:01:22.240217 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:01:22 crc kubenswrapper[4854]: I1125 10:01:22.712136 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:01:22 crc kubenswrapper[4854]: I1125 10:01:22.785917 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.042474 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fc1dc61b-323f-46bd-a462-1c60e8d45948" path="/var/lib/kubelet/pods/fc1dc61b-323f-46bd-a462-1c60e8d45948/volumes" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.365527 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.385309 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.442823 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97e394d1-03c3-467a-84b3-daf51739d393-combined-ca-bundle\") pod \"97e394d1-03c3-467a-84b3-daf51739d393\" (UID: \"97e394d1-03c3-467a-84b3-daf51739d393\") " Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.442920 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97e394d1-03c3-467a-84b3-daf51739d393-logs\") pod \"97e394d1-03c3-467a-84b3-daf51739d393\" (UID: \"97e394d1-03c3-467a-84b3-daf51739d393\") " Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.443046 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2jbt4\" (UniqueName: \"kubernetes.io/projected/97e394d1-03c3-467a-84b3-daf51739d393-kube-api-access-2jbt4\") pod \"97e394d1-03c3-467a-84b3-daf51739d393\" (UID: \"97e394d1-03c3-467a-84b3-daf51739d393\") " Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.443110 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2784a16c-11ca-4472-bd50-f33dfb6f1086-combined-ca-bundle\") pod \"2784a16c-11ca-4472-bd50-f33dfb6f1086\" (UID: \"2784a16c-11ca-4472-bd50-f33dfb6f1086\") " Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.443133 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97e394d1-03c3-467a-84b3-daf51739d393-config-data\") pod \"97e394d1-03c3-467a-84b3-daf51739d393\" (UID: \"97e394d1-03c3-467a-84b3-daf51739d393\") " Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.443222 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tthxv\" (UniqueName: \"kubernetes.io/projected/2784a16c-11ca-4472-bd50-f33dfb6f1086-kube-api-access-tthxv\") pod \"2784a16c-11ca-4472-bd50-f33dfb6f1086\" (UID: \"2784a16c-11ca-4472-bd50-f33dfb6f1086\") " Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.443356 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/97e394d1-03c3-467a-84b3-daf51739d393-logs" (OuterVolumeSpecName: "logs") pod "97e394d1-03c3-467a-84b3-daf51739d393" (UID: "97e394d1-03c3-467a-84b3-daf51739d393"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.443392 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2784a16c-11ca-4472-bd50-f33dfb6f1086-config-data\") pod \"2784a16c-11ca-4472-bd50-f33dfb6f1086\" (UID: \"2784a16c-11ca-4472-bd50-f33dfb6f1086\") " Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.444144 4854 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97e394d1-03c3-467a-84b3-daf51739d393-logs\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.449203 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97e394d1-03c3-467a-84b3-daf51739d393-kube-api-access-2jbt4" (OuterVolumeSpecName: "kube-api-access-2jbt4") pod "97e394d1-03c3-467a-84b3-daf51739d393" (UID: "97e394d1-03c3-467a-84b3-daf51739d393"). InnerVolumeSpecName "kube-api-access-2jbt4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.449358 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2784a16c-11ca-4472-bd50-f33dfb6f1086-kube-api-access-tthxv" (OuterVolumeSpecName: "kube-api-access-tthxv") pod "2784a16c-11ca-4472-bd50-f33dfb6f1086" (UID: "2784a16c-11ca-4472-bd50-f33dfb6f1086"). InnerVolumeSpecName "kube-api-access-tthxv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.489483 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97e394d1-03c3-467a-84b3-daf51739d393-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "97e394d1-03c3-467a-84b3-daf51739d393" (UID: "97e394d1-03c3-467a-84b3-daf51739d393"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.499762 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97e394d1-03c3-467a-84b3-daf51739d393-config-data" (OuterVolumeSpecName: "config-data") pod "97e394d1-03c3-467a-84b3-daf51739d393" (UID: "97e394d1-03c3-467a-84b3-daf51739d393"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.500299 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2784a16c-11ca-4472-bd50-f33dfb6f1086-config-data" (OuterVolumeSpecName: "config-data") pod "2784a16c-11ca-4472-bd50-f33dfb6f1086" (UID: "2784a16c-11ca-4472-bd50-f33dfb6f1086"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.503398 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2784a16c-11ca-4472-bd50-f33dfb6f1086-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2784a16c-11ca-4472-bd50-f33dfb6f1086" (UID: "2784a16c-11ca-4472-bd50-f33dfb6f1086"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.546995 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97e394d1-03c3-467a-84b3-daf51739d393-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.547053 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2jbt4\" (UniqueName: \"kubernetes.io/projected/97e394d1-03c3-467a-84b3-daf51739d393-kube-api-access-2jbt4\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.547077 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2784a16c-11ca-4472-bd50-f33dfb6f1086-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.547097 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97e394d1-03c3-467a-84b3-daf51739d393-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.547115 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tthxv\" (UniqueName: \"kubernetes.io/projected/2784a16c-11ca-4472-bd50-f33dfb6f1086-kube-api-access-tthxv\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.547132 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2784a16c-11ca-4472-bd50-f33dfb6f1086-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.578444 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"709d7464-52a2-4fe9-a323-07839ae3d12d","Type":"ContainerStarted","Data":"1362a87b081619af6dffcf73233e4d02a63af60463d3b7171e8c066acbe1a3d6"} Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.578654 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"709d7464-52a2-4fe9-a323-07839ae3d12d","Type":"ContainerStarted","Data":"3f171cfad85b0e9666f17d073b403bb6a2ae9c3b31ef91394141ad7e7b9d9676"} Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.580916 4854 generic.go:334] "Generic (PLEG): container finished" podID="97e394d1-03c3-467a-84b3-daf51739d393" containerID="7766c365acdfbb46241f84834fd23ee08d973f52c05db08dbc9a32fb2ded376a" exitCode=137 Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.580966 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"97e394d1-03c3-467a-84b3-daf51739d393","Type":"ContainerDied","Data":"7766c365acdfbb46241f84834fd23ee08d973f52c05db08dbc9a32fb2ded376a"} Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.580983 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"97e394d1-03c3-467a-84b3-daf51739d393","Type":"ContainerDied","Data":"b086e17b9aaa0b5ce9639643f8c059034f1167635aca0c18d2d2b7740dbae72a"} Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.580997 4854 scope.go:117] "RemoveContainer" containerID="7766c365acdfbb46241f84834fd23ee08d973f52c05db08dbc9a32fb2ded376a" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.581118 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.587069 4854 generic.go:334] "Generic (PLEG): container finished" podID="2784a16c-11ca-4472-bd50-f33dfb6f1086" containerID="7b82f2faada8418d8877a74fa0bf4a4597bd1b06d2fecea671ecf522e2717795" exitCode=137 Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.587181 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"2784a16c-11ca-4472-bd50-f33dfb6f1086","Type":"ContainerDied","Data":"7b82f2faada8418d8877a74fa0bf4a4597bd1b06d2fecea671ecf522e2717795"} Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.587260 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"2784a16c-11ca-4472-bd50-f33dfb6f1086","Type":"ContainerDied","Data":"9f7246ec141b8d1bc4971385d65f625a4aed358e2c6c9ebf25406c9e83bf6a29"} Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.587348 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.608766 4854 scope.go:117] "RemoveContainer" containerID="1a339f85a50186188f248e634fa59dbc2d7892af64037b2a96423d487df4076a" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.644614 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.651315 4854 scope.go:117] "RemoveContainer" containerID="7766c365acdfbb46241f84834fd23ee08d973f52c05db08dbc9a32fb2ded376a" Nov 25 10:01:23 crc kubenswrapper[4854]: E1125 10:01:23.654786 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7766c365acdfbb46241f84834fd23ee08d973f52c05db08dbc9a32fb2ded376a\": container with ID starting with 7766c365acdfbb46241f84834fd23ee08d973f52c05db08dbc9a32fb2ded376a not found: ID does not exist" containerID="7766c365acdfbb46241f84834fd23ee08d973f52c05db08dbc9a32fb2ded376a" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.654832 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7766c365acdfbb46241f84834fd23ee08d973f52c05db08dbc9a32fb2ded376a"} err="failed to get container status \"7766c365acdfbb46241f84834fd23ee08d973f52c05db08dbc9a32fb2ded376a\": rpc error: code = NotFound desc = could not find container \"7766c365acdfbb46241f84834fd23ee08d973f52c05db08dbc9a32fb2ded376a\": container with ID starting with 7766c365acdfbb46241f84834fd23ee08d973f52c05db08dbc9a32fb2ded376a not found: ID does not exist" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.654856 4854 scope.go:117] "RemoveContainer" containerID="1a339f85a50186188f248e634fa59dbc2d7892af64037b2a96423d487df4076a" Nov 25 10:01:23 crc kubenswrapper[4854]: E1125 10:01:23.655474 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1a339f85a50186188f248e634fa59dbc2d7892af64037b2a96423d487df4076a\": container with ID starting with 1a339f85a50186188f248e634fa59dbc2d7892af64037b2a96423d487df4076a not found: ID does not exist" containerID="1a339f85a50186188f248e634fa59dbc2d7892af64037b2a96423d487df4076a" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.655501 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1a339f85a50186188f248e634fa59dbc2d7892af64037b2a96423d487df4076a"} err="failed to get container status \"1a339f85a50186188f248e634fa59dbc2d7892af64037b2a96423d487df4076a\": rpc error: code = NotFound desc = could not find container \"1a339f85a50186188f248e634fa59dbc2d7892af64037b2a96423d487df4076a\": container with ID starting with 1a339f85a50186188f248e634fa59dbc2d7892af64037b2a96423d487df4076a not found: ID does not exist" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.655516 4854 scope.go:117] "RemoveContainer" containerID="7b82f2faada8418d8877a74fa0bf4a4597bd1b06d2fecea671ecf522e2717795" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.666882 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.694656 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:01:23 crc kubenswrapper[4854]: E1125 10:01:23.695237 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97e394d1-03c3-467a-84b3-daf51739d393" containerName="nova-metadata-metadata" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.695250 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="97e394d1-03c3-467a-84b3-daf51739d393" containerName="nova-metadata-metadata" Nov 25 10:01:23 crc kubenswrapper[4854]: E1125 10:01:23.695263 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97e394d1-03c3-467a-84b3-daf51739d393" containerName="nova-metadata-log" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.695269 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="97e394d1-03c3-467a-84b3-daf51739d393" containerName="nova-metadata-log" Nov 25 10:01:23 crc kubenswrapper[4854]: E1125 10:01:23.695302 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2784a16c-11ca-4472-bd50-f33dfb6f1086" containerName="nova-cell1-novncproxy-novncproxy" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.695309 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="2784a16c-11ca-4472-bd50-f33dfb6f1086" containerName="nova-cell1-novncproxy-novncproxy" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.695536 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="97e394d1-03c3-467a-84b3-daf51739d393" containerName="nova-metadata-log" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.695552 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="2784a16c-11ca-4472-bd50-f33dfb6f1086" containerName="nova-cell1-novncproxy-novncproxy" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.695564 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="97e394d1-03c3-467a-84b3-daf51739d393" containerName="nova-metadata-metadata" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.696930 4854 scope.go:117] "RemoveContainer" containerID="7b82f2faada8418d8877a74fa0bf4a4597bd1b06d2fecea671ecf522e2717795" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.696965 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 10:01:23 crc kubenswrapper[4854]: E1125 10:01:23.699928 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7b82f2faada8418d8877a74fa0bf4a4597bd1b06d2fecea671ecf522e2717795\": container with ID starting with 7b82f2faada8418d8877a74fa0bf4a4597bd1b06d2fecea671ecf522e2717795 not found: ID does not exist" containerID="7b82f2faada8418d8877a74fa0bf4a4597bd1b06d2fecea671ecf522e2717795" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.699984 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b82f2faada8418d8877a74fa0bf4a4597bd1b06d2fecea671ecf522e2717795"} err="failed to get container status \"7b82f2faada8418d8877a74fa0bf4a4597bd1b06d2fecea671ecf522e2717795\": rpc error: code = NotFound desc = could not find container \"7b82f2faada8418d8877a74fa0bf4a4597bd1b06d2fecea671ecf522e2717795\": container with ID starting with 7b82f2faada8418d8877a74fa0bf4a4597bd1b06d2fecea671ecf522e2717795 not found: ID does not exist" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.703059 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.703314 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.709152 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.725344 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.737323 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.749466 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.751056 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.753970 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/376f9383-a48b-42f0-be32-cf24789335d8-config-data\") pod \"nova-metadata-0\" (UID: \"376f9383-a48b-42f0-be32-cf24789335d8\") " pod="openstack/nova-metadata-0" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.754014 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/376f9383-a48b-42f0-be32-cf24789335d8-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"376f9383-a48b-42f0-be32-cf24789335d8\") " pod="openstack/nova-metadata-0" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.754061 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/376f9383-a48b-42f0-be32-cf24789335d8-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"376f9383-a48b-42f0-be32-cf24789335d8\") " pod="openstack/nova-metadata-0" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.754083 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hjsrm\" (UniqueName: \"kubernetes.io/projected/376f9383-a48b-42f0-be32-cf24789335d8-kube-api-access-hjsrm\") pod \"nova-metadata-0\" (UID: \"376f9383-a48b-42f0-be32-cf24789335d8\") " pod="openstack/nova-metadata-0" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.754152 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/376f9383-a48b-42f0-be32-cf24789335d8-logs\") pod \"nova-metadata-0\" (UID: \"376f9383-a48b-42f0-be32-cf24789335d8\") " pod="openstack/nova-metadata-0" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.754932 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.755482 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.755548 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.763448 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.858528 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe4a1f25-2ac0-4fa9-8abd-934878c41b2a-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"fe4a1f25-2ac0-4fa9-8abd-934878c41b2a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.858635 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wnpl4\" (UniqueName: \"kubernetes.io/projected/fe4a1f25-2ac0-4fa9-8abd-934878c41b2a-kube-api-access-wnpl4\") pod \"nova-cell1-novncproxy-0\" (UID: \"fe4a1f25-2ac0-4fa9-8abd-934878c41b2a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.858741 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe4a1f25-2ac0-4fa9-8abd-934878c41b2a-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"fe4a1f25-2ac0-4fa9-8abd-934878c41b2a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.858802 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe4a1f25-2ac0-4fa9-8abd-934878c41b2a-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"fe4a1f25-2ac0-4fa9-8abd-934878c41b2a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.858906 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/376f9383-a48b-42f0-be32-cf24789335d8-config-data\") pod \"nova-metadata-0\" (UID: \"376f9383-a48b-42f0-be32-cf24789335d8\") " pod="openstack/nova-metadata-0" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.859803 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/376f9383-a48b-42f0-be32-cf24789335d8-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"376f9383-a48b-42f0-be32-cf24789335d8\") " pod="openstack/nova-metadata-0" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.860132 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/376f9383-a48b-42f0-be32-cf24789335d8-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"376f9383-a48b-42f0-be32-cf24789335d8\") " pod="openstack/nova-metadata-0" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.860160 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe4a1f25-2ac0-4fa9-8abd-934878c41b2a-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"fe4a1f25-2ac0-4fa9-8abd-934878c41b2a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.860180 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hjsrm\" (UniqueName: \"kubernetes.io/projected/376f9383-a48b-42f0-be32-cf24789335d8-kube-api-access-hjsrm\") pod \"nova-metadata-0\" (UID: \"376f9383-a48b-42f0-be32-cf24789335d8\") " pod="openstack/nova-metadata-0" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.860684 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/376f9383-a48b-42f0-be32-cf24789335d8-logs\") pod \"nova-metadata-0\" (UID: \"376f9383-a48b-42f0-be32-cf24789335d8\") " pod="openstack/nova-metadata-0" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.861139 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/376f9383-a48b-42f0-be32-cf24789335d8-logs\") pod \"nova-metadata-0\" (UID: \"376f9383-a48b-42f0-be32-cf24789335d8\") " pod="openstack/nova-metadata-0" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.865002 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/376f9383-a48b-42f0-be32-cf24789335d8-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"376f9383-a48b-42f0-be32-cf24789335d8\") " pod="openstack/nova-metadata-0" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.865224 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/376f9383-a48b-42f0-be32-cf24789335d8-config-data\") pod \"nova-metadata-0\" (UID: \"376f9383-a48b-42f0-be32-cf24789335d8\") " pod="openstack/nova-metadata-0" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.868389 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/376f9383-a48b-42f0-be32-cf24789335d8-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"376f9383-a48b-42f0-be32-cf24789335d8\") " pod="openstack/nova-metadata-0" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.876972 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjsrm\" (UniqueName: \"kubernetes.io/projected/376f9383-a48b-42f0-be32-cf24789335d8-kube-api-access-hjsrm\") pod \"nova-metadata-0\" (UID: \"376f9383-a48b-42f0-be32-cf24789335d8\") " pod="openstack/nova-metadata-0" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.963478 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe4a1f25-2ac0-4fa9-8abd-934878c41b2a-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"fe4a1f25-2ac0-4fa9-8abd-934878c41b2a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.963535 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wnpl4\" (UniqueName: \"kubernetes.io/projected/fe4a1f25-2ac0-4fa9-8abd-934878c41b2a-kube-api-access-wnpl4\") pod \"nova-cell1-novncproxy-0\" (UID: \"fe4a1f25-2ac0-4fa9-8abd-934878c41b2a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.963574 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe4a1f25-2ac0-4fa9-8abd-934878c41b2a-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"fe4a1f25-2ac0-4fa9-8abd-934878c41b2a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.963611 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe4a1f25-2ac0-4fa9-8abd-934878c41b2a-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"fe4a1f25-2ac0-4fa9-8abd-934878c41b2a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.963752 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe4a1f25-2ac0-4fa9-8abd-934878c41b2a-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"fe4a1f25-2ac0-4fa9-8abd-934878c41b2a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.971582 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe4a1f25-2ac0-4fa9-8abd-934878c41b2a-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"fe4a1f25-2ac0-4fa9-8abd-934878c41b2a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.972145 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe4a1f25-2ac0-4fa9-8abd-934878c41b2a-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"fe4a1f25-2ac0-4fa9-8abd-934878c41b2a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.972352 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe4a1f25-2ac0-4fa9-8abd-934878c41b2a-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"fe4a1f25-2ac0-4fa9-8abd-934878c41b2a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.975146 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe4a1f25-2ac0-4fa9-8abd-934878c41b2a-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"fe4a1f25-2ac0-4fa9-8abd-934878c41b2a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:01:23 crc kubenswrapper[4854]: I1125 10:01:23.989865 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wnpl4\" (UniqueName: \"kubernetes.io/projected/fe4a1f25-2ac0-4fa9-8abd-934878c41b2a-kube-api-access-wnpl4\") pod \"nova-cell1-novncproxy-0\" (UID: \"fe4a1f25-2ac0-4fa9-8abd-934878c41b2a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.020905 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.092929 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.299607 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.372140 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zb9dw\" (UniqueName: \"kubernetes.io/projected/9064c524-7e0e-4327-ae10-b531d78c450b-kube-api-access-zb9dw\") pod \"9064c524-7e0e-4327-ae10-b531d78c450b\" (UID: \"9064c524-7e0e-4327-ae10-b531d78c450b\") " Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.372254 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9064c524-7e0e-4327-ae10-b531d78c450b-config-data\") pod \"9064c524-7e0e-4327-ae10-b531d78c450b\" (UID: \"9064c524-7e0e-4327-ae10-b531d78c450b\") " Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.372309 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9064c524-7e0e-4327-ae10-b531d78c450b-logs\") pod \"9064c524-7e0e-4327-ae10-b531d78c450b\" (UID: \"9064c524-7e0e-4327-ae10-b531d78c450b\") " Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.372408 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9064c524-7e0e-4327-ae10-b531d78c450b-combined-ca-bundle\") pod \"9064c524-7e0e-4327-ae10-b531d78c450b\" (UID: \"9064c524-7e0e-4327-ae10-b531d78c450b\") " Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.373232 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9064c524-7e0e-4327-ae10-b531d78c450b-logs" (OuterVolumeSpecName: "logs") pod "9064c524-7e0e-4327-ae10-b531d78c450b" (UID: "9064c524-7e0e-4327-ae10-b531d78c450b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.379633 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9064c524-7e0e-4327-ae10-b531d78c450b-kube-api-access-zb9dw" (OuterVolumeSpecName: "kube-api-access-zb9dw") pod "9064c524-7e0e-4327-ae10-b531d78c450b" (UID: "9064c524-7e0e-4327-ae10-b531d78c450b"). InnerVolumeSpecName "kube-api-access-zb9dw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.404778 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9064c524-7e0e-4327-ae10-b531d78c450b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9064c524-7e0e-4327-ae10-b531d78c450b" (UID: "9064c524-7e0e-4327-ae10-b531d78c450b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.410064 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9064c524-7e0e-4327-ae10-b531d78c450b-config-data" (OuterVolumeSpecName: "config-data") pod "9064c524-7e0e-4327-ae10-b531d78c450b" (UID: "9064c524-7e0e-4327-ae10-b531d78c450b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.475949 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zb9dw\" (UniqueName: \"kubernetes.io/projected/9064c524-7e0e-4327-ae10-b531d78c450b-kube-api-access-zb9dw\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.475972 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9064c524-7e0e-4327-ae10-b531d78c450b-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.475981 4854 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9064c524-7e0e-4327-ae10-b531d78c450b-logs\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.475989 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9064c524-7e0e-4327-ae10-b531d78c450b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.578526 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.611905 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"709d7464-52a2-4fe9-a323-07839ae3d12d","Type":"ContainerStarted","Data":"6712b217b2dda854e96bac8ff06d92e1386e16dc04b489f571ceb05955decbe7"} Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.625764 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"376f9383-a48b-42f0-be32-cf24789335d8","Type":"ContainerStarted","Data":"ea8a247c7b143769a034b223c9d1ad56a8d0b1527b01a2e1ae0915d030820e94"} Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.631477 4854 generic.go:334] "Generic (PLEG): container finished" podID="9064c524-7e0e-4327-ae10-b531d78c450b" containerID="e262bc08593155d85adc8bedd60ad33f23acf2d35d857aecac9f001124d77d7a" exitCode=0 Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.631533 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9064c524-7e0e-4327-ae10-b531d78c450b","Type":"ContainerDied","Data":"e262bc08593155d85adc8bedd60ad33f23acf2d35d857aecac9f001124d77d7a"} Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.631564 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9064c524-7e0e-4327-ae10-b531d78c450b","Type":"ContainerDied","Data":"f81c4eebc8612d0ad2cf5522672c05fda35477895f396cac8bfab7b11496cab4"} Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.631584 4854 scope.go:117] "RemoveContainer" containerID="e262bc08593155d85adc8bedd60ad33f23acf2d35d857aecac9f001124d77d7a" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.631788 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.679608 4854 scope.go:117] "RemoveContainer" containerID="e22958146f2cdc3537831dcf65e2566e5f17adf8e70f6c5cdc2081b29b44397a" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.692728 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.715731 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.748232 4854 scope.go:117] "RemoveContainer" containerID="e262bc08593155d85adc8bedd60ad33f23acf2d35d857aecac9f001124d77d7a" Nov 25 10:01:24 crc kubenswrapper[4854]: E1125 10:01:24.750257 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e262bc08593155d85adc8bedd60ad33f23acf2d35d857aecac9f001124d77d7a\": container with ID starting with e262bc08593155d85adc8bedd60ad33f23acf2d35d857aecac9f001124d77d7a not found: ID does not exist" containerID="e262bc08593155d85adc8bedd60ad33f23acf2d35d857aecac9f001124d77d7a" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.750306 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e262bc08593155d85adc8bedd60ad33f23acf2d35d857aecac9f001124d77d7a"} err="failed to get container status \"e262bc08593155d85adc8bedd60ad33f23acf2d35d857aecac9f001124d77d7a\": rpc error: code = NotFound desc = could not find container \"e262bc08593155d85adc8bedd60ad33f23acf2d35d857aecac9f001124d77d7a\": container with ID starting with e262bc08593155d85adc8bedd60ad33f23acf2d35d857aecac9f001124d77d7a not found: ID does not exist" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.750380 4854 scope.go:117] "RemoveContainer" containerID="e22958146f2cdc3537831dcf65e2566e5f17adf8e70f6c5cdc2081b29b44397a" Nov 25 10:01:24 crc kubenswrapper[4854]: E1125 10:01:24.750712 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e22958146f2cdc3537831dcf65e2566e5f17adf8e70f6c5cdc2081b29b44397a\": container with ID starting with e22958146f2cdc3537831dcf65e2566e5f17adf8e70f6c5cdc2081b29b44397a not found: ID does not exist" containerID="e22958146f2cdc3537831dcf65e2566e5f17adf8e70f6c5cdc2081b29b44397a" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.750768 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e22958146f2cdc3537831dcf65e2566e5f17adf8e70f6c5cdc2081b29b44397a"} err="failed to get container status \"e22958146f2cdc3537831dcf65e2566e5f17adf8e70f6c5cdc2081b29b44397a\": rpc error: code = NotFound desc = could not find container \"e22958146f2cdc3537831dcf65e2566e5f17adf8e70f6c5cdc2081b29b44397a\": container with ID starting with e22958146f2cdc3537831dcf65e2566e5f17adf8e70f6c5cdc2081b29b44397a not found: ID does not exist" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.785313 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.816120 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 10:01:24 crc kubenswrapper[4854]: E1125 10:01:24.816727 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9064c524-7e0e-4327-ae10-b531d78c450b" containerName="nova-api-api" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.816753 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="9064c524-7e0e-4327-ae10-b531d78c450b" containerName="nova-api-api" Nov 25 10:01:24 crc kubenswrapper[4854]: E1125 10:01:24.816781 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9064c524-7e0e-4327-ae10-b531d78c450b" containerName="nova-api-log" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.816790 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="9064c524-7e0e-4327-ae10-b531d78c450b" containerName="nova-api-log" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.829308 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="9064c524-7e0e-4327-ae10-b531d78c450b" containerName="nova-api-log" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.829347 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="9064c524-7e0e-4327-ae10-b531d78c450b" containerName="nova-api-api" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.831359 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.831471 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.834905 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.834945 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.835121 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.890376 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p8mvd\" (UniqueName: \"kubernetes.io/projected/c8c89199-292c-4224-9034-550542c29686-kube-api-access-p8mvd\") pod \"nova-api-0\" (UID: \"c8c89199-292c-4224-9034-550542c29686\") " pod="openstack/nova-api-0" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.890440 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c8c89199-292c-4224-9034-550542c29686-public-tls-certs\") pod \"nova-api-0\" (UID: \"c8c89199-292c-4224-9034-550542c29686\") " pod="openstack/nova-api-0" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.891134 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8c89199-292c-4224-9034-550542c29686-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c8c89199-292c-4224-9034-550542c29686\") " pod="openstack/nova-api-0" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.891298 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c8c89199-292c-4224-9034-550542c29686-internal-tls-certs\") pod \"nova-api-0\" (UID: \"c8c89199-292c-4224-9034-550542c29686\") " pod="openstack/nova-api-0" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.891347 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8c89199-292c-4224-9034-550542c29686-config-data\") pod \"nova-api-0\" (UID: \"c8c89199-292c-4224-9034-550542c29686\") " pod="openstack/nova-api-0" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.891538 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c8c89199-292c-4224-9034-550542c29686-logs\") pod \"nova-api-0\" (UID: \"c8c89199-292c-4224-9034-550542c29686\") " pod="openstack/nova-api-0" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.995779 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8c89199-292c-4224-9034-550542c29686-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c8c89199-292c-4224-9034-550542c29686\") " pod="openstack/nova-api-0" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.995867 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c8c89199-292c-4224-9034-550542c29686-internal-tls-certs\") pod \"nova-api-0\" (UID: \"c8c89199-292c-4224-9034-550542c29686\") " pod="openstack/nova-api-0" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.996302 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8c89199-292c-4224-9034-550542c29686-config-data\") pod \"nova-api-0\" (UID: \"c8c89199-292c-4224-9034-550542c29686\") " pod="openstack/nova-api-0" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.996497 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c8c89199-292c-4224-9034-550542c29686-logs\") pod \"nova-api-0\" (UID: \"c8c89199-292c-4224-9034-550542c29686\") " pod="openstack/nova-api-0" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.996601 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p8mvd\" (UniqueName: \"kubernetes.io/projected/c8c89199-292c-4224-9034-550542c29686-kube-api-access-p8mvd\") pod \"nova-api-0\" (UID: \"c8c89199-292c-4224-9034-550542c29686\") " pod="openstack/nova-api-0" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.996723 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c8c89199-292c-4224-9034-550542c29686-public-tls-certs\") pod \"nova-api-0\" (UID: \"c8c89199-292c-4224-9034-550542c29686\") " pod="openstack/nova-api-0" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.997007 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c8c89199-292c-4224-9034-550542c29686-logs\") pod \"nova-api-0\" (UID: \"c8c89199-292c-4224-9034-550542c29686\") " pod="openstack/nova-api-0" Nov 25 10:01:24 crc kubenswrapper[4854]: I1125 10:01:24.999745 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c8c89199-292c-4224-9034-550542c29686-public-tls-certs\") pod \"nova-api-0\" (UID: \"c8c89199-292c-4224-9034-550542c29686\") " pod="openstack/nova-api-0" Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:24.999981 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8c89199-292c-4224-9034-550542c29686-config-data\") pod \"nova-api-0\" (UID: \"c8c89199-292c-4224-9034-550542c29686\") " pod="openstack/nova-api-0" Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.000108 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c8c89199-292c-4224-9034-550542c29686-internal-tls-certs\") pod \"nova-api-0\" (UID: \"c8c89199-292c-4224-9034-550542c29686\") " pod="openstack/nova-api-0" Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.002584 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8c89199-292c-4224-9034-550542c29686-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c8c89199-292c-4224-9034-550542c29686\") " pod="openstack/nova-api-0" Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.021300 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p8mvd\" (UniqueName: \"kubernetes.io/projected/c8c89199-292c-4224-9034-550542c29686-kube-api-access-p8mvd\") pod \"nova-api-0\" (UID: \"c8c89199-292c-4224-9034-550542c29686\") " pod="openstack/nova-api-0" Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.034120 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2784a16c-11ca-4472-bd50-f33dfb6f1086" path="/var/lib/kubelet/pods/2784a16c-11ca-4472-bd50-f33dfb6f1086/volumes" Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.035093 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9064c524-7e0e-4327-ae10-b531d78c450b" path="/var/lib/kubelet/pods/9064c524-7e0e-4327-ae10-b531d78c450b/volumes" Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.036054 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="97e394d1-03c3-467a-84b3-daf51739d393" path="/var/lib/kubelet/pods/97e394d1-03c3-467a-84b3-daf51739d393/volumes" Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.311709 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.493189 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.509509 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fq4dk\" (UniqueName: \"kubernetes.io/projected/d2b5b62d-c86d-4e14-a9ff-b41300b16a72-kube-api-access-fq4dk\") pod \"d2b5b62d-c86d-4e14-a9ff-b41300b16a72\" (UID: \"d2b5b62d-c86d-4e14-a9ff-b41300b16a72\") " Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.509636 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d2b5b62d-c86d-4e14-a9ff-b41300b16a72-config-data\") pod \"d2b5b62d-c86d-4e14-a9ff-b41300b16a72\" (UID: \"d2b5b62d-c86d-4e14-a9ff-b41300b16a72\") " Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.509710 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2b5b62d-c86d-4e14-a9ff-b41300b16a72-combined-ca-bundle\") pod \"d2b5b62d-c86d-4e14-a9ff-b41300b16a72\" (UID: \"d2b5b62d-c86d-4e14-a9ff-b41300b16a72\") " Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.539896 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d2b5b62d-c86d-4e14-a9ff-b41300b16a72-kube-api-access-fq4dk" (OuterVolumeSpecName: "kube-api-access-fq4dk") pod "d2b5b62d-c86d-4e14-a9ff-b41300b16a72" (UID: "d2b5b62d-c86d-4e14-a9ff-b41300b16a72"). InnerVolumeSpecName "kube-api-access-fq4dk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.593850 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d2b5b62d-c86d-4e14-a9ff-b41300b16a72-config-data" (OuterVolumeSpecName: "config-data") pod "d2b5b62d-c86d-4e14-a9ff-b41300b16a72" (UID: "d2b5b62d-c86d-4e14-a9ff-b41300b16a72"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.598808 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d2b5b62d-c86d-4e14-a9ff-b41300b16a72-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d2b5b62d-c86d-4e14-a9ff-b41300b16a72" (UID: "d2b5b62d-c86d-4e14-a9ff-b41300b16a72"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.616412 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fq4dk\" (UniqueName: \"kubernetes.io/projected/d2b5b62d-c86d-4e14-a9ff-b41300b16a72-kube-api-access-fq4dk\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.616450 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d2b5b62d-c86d-4e14-a9ff-b41300b16a72-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.616463 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2b5b62d-c86d-4e14-a9ff-b41300b16a72-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.719848 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"fe4a1f25-2ac0-4fa9-8abd-934878c41b2a","Type":"ContainerStarted","Data":"8cb20a3321f87f5312b1b5cbe3f253ede960fc83d9d4af7503e35926726a3b69"} Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.720589 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"fe4a1f25-2ac0-4fa9-8abd-934878c41b2a","Type":"ContainerStarted","Data":"b0306fb852e6b4fe65bc95fa31e545141a331a83788c6830e827c6451229578d"} Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.744820 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"709d7464-52a2-4fe9-a323-07839ae3d12d","Type":"ContainerStarted","Data":"7c17dc11d805d5a3ec3f2e7e676d83375900a32591c255dfacd5a183c3c250e2"} Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.747287 4854 generic.go:334] "Generic (PLEG): container finished" podID="d2b5b62d-c86d-4e14-a9ff-b41300b16a72" containerID="bdcf7b6dc47d643d3506a3b170aab96f7245504d676472e8d416a761b2dce189" exitCode=137 Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.747348 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"d2b5b62d-c86d-4e14-a9ff-b41300b16a72","Type":"ContainerDied","Data":"bdcf7b6dc47d643d3506a3b170aab96f7245504d676472e8d416a761b2dce189"} Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.747375 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"d2b5b62d-c86d-4e14-a9ff-b41300b16a72","Type":"ContainerDied","Data":"ddff51015d6b95d7adcb487153674189a4a636a3007b9499f6e17505b3c06422"} Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.747393 4854 scope.go:117] "RemoveContainer" containerID="bdcf7b6dc47d643d3506a3b170aab96f7245504d676472e8d416a761b2dce189" Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.747526 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.777965 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"376f9383-a48b-42f0-be32-cf24789335d8","Type":"ContainerStarted","Data":"480399c4b75a4784fef4845fcc1e0dade244e04c1b38c64bc2c1094eac40e9de"} Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.778038 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"376f9383-a48b-42f0-be32-cf24789335d8","Type":"ContainerStarted","Data":"0741b8580147ee1631d60908d95a4ce5e3d66089d43365d638738bcae2834e09"} Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.802909 4854 scope.go:117] "RemoveContainer" containerID="bdcf7b6dc47d643d3506a3b170aab96f7245504d676472e8d416a761b2dce189" Nov 25 10:01:25 crc kubenswrapper[4854]: E1125 10:01:25.811581 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bdcf7b6dc47d643d3506a3b170aab96f7245504d676472e8d416a761b2dce189\": container with ID starting with bdcf7b6dc47d643d3506a3b170aab96f7245504d676472e8d416a761b2dce189 not found: ID does not exist" containerID="bdcf7b6dc47d643d3506a3b170aab96f7245504d676472e8d416a761b2dce189" Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.811654 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bdcf7b6dc47d643d3506a3b170aab96f7245504d676472e8d416a761b2dce189"} err="failed to get container status \"bdcf7b6dc47d643d3506a3b170aab96f7245504d676472e8d416a761b2dce189\": rpc error: code = NotFound desc = could not find container \"bdcf7b6dc47d643d3506a3b170aab96f7245504d676472e8d416a761b2dce189\": container with ID starting with bdcf7b6dc47d643d3506a3b170aab96f7245504d676472e8d416a761b2dce189 not found: ID does not exist" Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.849516 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.849493796 podStartE2EDuration="2.849493796s" podCreationTimestamp="2025-11-25 10:01:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:01:25.766388746 +0000 UTC m=+1491.619382122" watchObservedRunningTime="2025-11-25 10:01:25.849493796 +0000 UTC m=+1491.702487172" Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.858193 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.858165385 podStartE2EDuration="2.858165385s" podCreationTimestamp="2025-11-25 10:01:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:01:25.823189687 +0000 UTC m=+1491.676183053" watchObservedRunningTime="2025-11-25 10:01:25.858165385 +0000 UTC m=+1491.711158781" Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.899223 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.915281 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.927656 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:01:25 crc kubenswrapper[4854]: E1125 10:01:25.928210 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2b5b62d-c86d-4e14-a9ff-b41300b16a72" containerName="nova-scheduler-scheduler" Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.928233 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2b5b62d-c86d-4e14-a9ff-b41300b16a72" containerName="nova-scheduler-scheduler" Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.928464 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="d2b5b62d-c86d-4e14-a9ff-b41300b16a72" containerName="nova-scheduler-scheduler" Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.929299 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.933820 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 25 10:01:25 crc kubenswrapper[4854]: I1125 10:01:25.941299 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:01:26 crc kubenswrapper[4854]: I1125 10:01:26.128535 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:01:26 crc kubenswrapper[4854]: I1125 10:01:26.135111 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pnks6\" (UniqueName: \"kubernetes.io/projected/df9fdfc5-3a37-43c1-a1a8-fc02c384e890-kube-api-access-pnks6\") pod \"nova-scheduler-0\" (UID: \"df9fdfc5-3a37-43c1-a1a8-fc02c384e890\") " pod="openstack/nova-scheduler-0" Nov 25 10:01:26 crc kubenswrapper[4854]: I1125 10:01:26.135205 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df9fdfc5-3a37-43c1-a1a8-fc02c384e890-config-data\") pod \"nova-scheduler-0\" (UID: \"df9fdfc5-3a37-43c1-a1a8-fc02c384e890\") " pod="openstack/nova-scheduler-0" Nov 25 10:01:26 crc kubenswrapper[4854]: I1125 10:01:26.136259 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df9fdfc5-3a37-43c1-a1a8-fc02c384e890-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"df9fdfc5-3a37-43c1-a1a8-fc02c384e890\") " pod="openstack/nova-scheduler-0" Nov 25 10:01:26 crc kubenswrapper[4854]: I1125 10:01:26.240868 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df9fdfc5-3a37-43c1-a1a8-fc02c384e890-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"df9fdfc5-3a37-43c1-a1a8-fc02c384e890\") " pod="openstack/nova-scheduler-0" Nov 25 10:01:26 crc kubenswrapper[4854]: I1125 10:01:26.241221 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pnks6\" (UniqueName: \"kubernetes.io/projected/df9fdfc5-3a37-43c1-a1a8-fc02c384e890-kube-api-access-pnks6\") pod \"nova-scheduler-0\" (UID: \"df9fdfc5-3a37-43c1-a1a8-fc02c384e890\") " pod="openstack/nova-scheduler-0" Nov 25 10:01:26 crc kubenswrapper[4854]: I1125 10:01:26.241275 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df9fdfc5-3a37-43c1-a1a8-fc02c384e890-config-data\") pod \"nova-scheduler-0\" (UID: \"df9fdfc5-3a37-43c1-a1a8-fc02c384e890\") " pod="openstack/nova-scheduler-0" Nov 25 10:01:26 crc kubenswrapper[4854]: I1125 10:01:26.250033 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df9fdfc5-3a37-43c1-a1a8-fc02c384e890-config-data\") pod \"nova-scheduler-0\" (UID: \"df9fdfc5-3a37-43c1-a1a8-fc02c384e890\") " pod="openstack/nova-scheduler-0" Nov 25 10:01:26 crc kubenswrapper[4854]: I1125 10:01:26.251825 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df9fdfc5-3a37-43c1-a1a8-fc02c384e890-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"df9fdfc5-3a37-43c1-a1a8-fc02c384e890\") " pod="openstack/nova-scheduler-0" Nov 25 10:01:26 crc kubenswrapper[4854]: I1125 10:01:26.261267 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pnks6\" (UniqueName: \"kubernetes.io/projected/df9fdfc5-3a37-43c1-a1a8-fc02c384e890-kube-api-access-pnks6\") pod \"nova-scheduler-0\" (UID: \"df9fdfc5-3a37-43c1-a1a8-fc02c384e890\") " pod="openstack/nova-scheduler-0" Nov 25 10:01:26 crc kubenswrapper[4854]: I1125 10:01:26.558338 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 10:01:26 crc kubenswrapper[4854]: I1125 10:01:26.866815 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c8c89199-292c-4224-9034-550542c29686","Type":"ContainerStarted","Data":"d91a6d1bb5fed2567d0a6f325fa09bb4a3a5b6141e3d95db4230477e1e0e7b6a"} Nov 25 10:01:26 crc kubenswrapper[4854]: I1125 10:01:26.867167 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c8c89199-292c-4224-9034-550542c29686","Type":"ContainerStarted","Data":"a4a9a070aa48ce40c1c3dfddfce468ebf3bf4fa3a1e6549c1218f32b57ab984b"} Nov 25 10:01:26 crc kubenswrapper[4854]: E1125 10:01:26.876318 4854 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda16432da_436b_4d4c_b383_e009ff4a4ff6.slice/crio-fc1ce70cb30329b8b1d10d10efe6bb7b931f1af836e0f1bdb147f97c6bbd3eec\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda16432da_436b_4d4c_b383_e009ff4a4ff6.slice\": RecentStats: unable to find data in memory cache]" Nov 25 10:01:27 crc kubenswrapper[4854]: I1125 10:01:27.028059 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d2b5b62d-c86d-4e14-a9ff-b41300b16a72" path="/var/lib/kubelet/pods/d2b5b62d-c86d-4e14-a9ff-b41300b16a72/volumes" Nov 25 10:01:27 crc kubenswrapper[4854]: W1125 10:01:27.107192 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddf9fdfc5_3a37_43c1_a1a8_fc02c384e890.slice/crio-f338fcb4d0d656ac07ccbab2d3676fae02e62dbc3ee3a6ea7a0b4b7d0785ae8b WatchSource:0}: Error finding container f338fcb4d0d656ac07ccbab2d3676fae02e62dbc3ee3a6ea7a0b4b7d0785ae8b: Status 404 returned error can't find the container with id f338fcb4d0d656ac07ccbab2d3676fae02e62dbc3ee3a6ea7a0b4b7d0785ae8b Nov 25 10:01:27 crc kubenswrapper[4854]: I1125 10:01:27.109568 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:01:27 crc kubenswrapper[4854]: I1125 10:01:27.885349 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"709d7464-52a2-4fe9-a323-07839ae3d12d","Type":"ContainerStarted","Data":"6005a32a92f457ecc39461369892e5e4224345975f9bcecc08feae19a9cddf1c"} Nov 25 10:01:27 crc kubenswrapper[4854]: I1125 10:01:27.885594 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="709d7464-52a2-4fe9-a323-07839ae3d12d" containerName="ceilometer-central-agent" containerID="cri-o://1362a87b081619af6dffcf73233e4d02a63af60463d3b7171e8c066acbe1a3d6" gracePeriod=30 Nov 25 10:01:27 crc kubenswrapper[4854]: I1125 10:01:27.885869 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 10:01:27 crc kubenswrapper[4854]: I1125 10:01:27.885949 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="709d7464-52a2-4fe9-a323-07839ae3d12d" containerName="sg-core" containerID="cri-o://7c17dc11d805d5a3ec3f2e7e676d83375900a32591c255dfacd5a183c3c250e2" gracePeriod=30 Nov 25 10:01:27 crc kubenswrapper[4854]: I1125 10:01:27.885967 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="709d7464-52a2-4fe9-a323-07839ae3d12d" containerName="proxy-httpd" containerID="cri-o://6005a32a92f457ecc39461369892e5e4224345975f9bcecc08feae19a9cddf1c" gracePeriod=30 Nov 25 10:01:27 crc kubenswrapper[4854]: I1125 10:01:27.886179 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="709d7464-52a2-4fe9-a323-07839ae3d12d" containerName="ceilometer-notification-agent" containerID="cri-o://6712b217b2dda854e96bac8ff06d92e1386e16dc04b489f571ceb05955decbe7" gracePeriod=30 Nov 25 10:01:27 crc kubenswrapper[4854]: I1125 10:01:27.895130 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"df9fdfc5-3a37-43c1-a1a8-fc02c384e890","Type":"ContainerStarted","Data":"ddeffabc2059c9522eeee201abf6b72481deae653c553f7977927a6b1977925c"} Nov 25 10:01:27 crc kubenswrapper[4854]: I1125 10:01:27.895179 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"df9fdfc5-3a37-43c1-a1a8-fc02c384e890","Type":"ContainerStarted","Data":"f338fcb4d0d656ac07ccbab2d3676fae02e62dbc3ee3a6ea7a0b4b7d0785ae8b"} Nov 25 10:01:27 crc kubenswrapper[4854]: I1125 10:01:27.918952 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c8c89199-292c-4224-9034-550542c29686","Type":"ContainerStarted","Data":"de1de2c4cdfb82c523ce89f276c5cedbd84ca8ce8d8945a18651f06dd17ae6f8"} Nov 25 10:01:27 crc kubenswrapper[4854]: I1125 10:01:27.937243 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.935633823 podStartE2EDuration="6.937223319s" podCreationTimestamp="2025-11-25 10:01:21 +0000 UTC" firstStartedPulling="2025-11-25 10:01:22.803041996 +0000 UTC m=+1488.656035372" lastFinishedPulling="2025-11-25 10:01:26.804631492 +0000 UTC m=+1492.657624868" observedRunningTime="2025-11-25 10:01:27.924191288 +0000 UTC m=+1493.777184684" watchObservedRunningTime="2025-11-25 10:01:27.937223319 +0000 UTC m=+1493.790216695" Nov 25 10:01:27 crc kubenswrapper[4854]: I1125 10:01:27.960332 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.9603089970000003 podStartE2EDuration="3.960308997s" podCreationTimestamp="2025-11-25 10:01:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:01:27.95463701 +0000 UTC m=+1493.807630396" watchObservedRunningTime="2025-11-25 10:01:27.960308997 +0000 UTC m=+1493.813302373" Nov 25 10:01:27 crc kubenswrapper[4854]: I1125 10:01:27.981639 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.981617987 podStartE2EDuration="2.981617987s" podCreationTimestamp="2025-11-25 10:01:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:01:27.977217515 +0000 UTC m=+1493.830210901" watchObservedRunningTime="2025-11-25 10:01:27.981617987 +0000 UTC m=+1493.834611363" Nov 25 10:01:27 crc kubenswrapper[4854]: I1125 10:01:27.985875 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-f84f9ccf-gzlbt" Nov 25 10:01:28 crc kubenswrapper[4854]: I1125 10:01:28.080467 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-568d7fd7cf-77kl2"] Nov 25 10:01:28 crc kubenswrapper[4854]: I1125 10:01:28.080952 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-568d7fd7cf-77kl2" podUID="6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e" containerName="dnsmasq-dns" containerID="cri-o://3314a63e0071bc5d7dc57371cc76b1b62ccfff082df6b9ddc8b4a10c9b26be06" gracePeriod=10 Nov 25 10:01:28 crc kubenswrapper[4854]: I1125 10:01:28.716630 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-568d7fd7cf-77kl2" Nov 25 10:01:28 crc kubenswrapper[4854]: I1125 10:01:28.908154 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2xrsm\" (UniqueName: \"kubernetes.io/projected/6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e-kube-api-access-2xrsm\") pod \"6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e\" (UID: \"6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e\") " Nov 25 10:01:28 crc kubenswrapper[4854]: I1125 10:01:28.909276 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e-config\") pod \"6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e\" (UID: \"6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e\") " Nov 25 10:01:28 crc kubenswrapper[4854]: I1125 10:01:28.909326 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e-ovsdbserver-sb\") pod \"6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e\" (UID: \"6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e\") " Nov 25 10:01:28 crc kubenswrapper[4854]: I1125 10:01:28.909401 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e-ovsdbserver-nb\") pod \"6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e\" (UID: \"6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e\") " Nov 25 10:01:28 crc kubenswrapper[4854]: I1125 10:01:28.909809 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e-dns-svc\") pod \"6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e\" (UID: \"6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e\") " Nov 25 10:01:28 crc kubenswrapper[4854]: I1125 10:01:28.909878 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e-dns-swift-storage-0\") pod \"6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e\" (UID: \"6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e\") " Nov 25 10:01:28 crc kubenswrapper[4854]: I1125 10:01:28.919916 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e-kube-api-access-2xrsm" (OuterVolumeSpecName: "kube-api-access-2xrsm") pod "6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e" (UID: "6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e"). InnerVolumeSpecName "kube-api-access-2xrsm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:01:28 crc kubenswrapper[4854]: I1125 10:01:28.936198 4854 generic.go:334] "Generic (PLEG): container finished" podID="709d7464-52a2-4fe9-a323-07839ae3d12d" containerID="6005a32a92f457ecc39461369892e5e4224345975f9bcecc08feae19a9cddf1c" exitCode=0 Nov 25 10:01:28 crc kubenswrapper[4854]: I1125 10:01:28.936240 4854 generic.go:334] "Generic (PLEG): container finished" podID="709d7464-52a2-4fe9-a323-07839ae3d12d" containerID="7c17dc11d805d5a3ec3f2e7e676d83375900a32591c255dfacd5a183c3c250e2" exitCode=2 Nov 25 10:01:28 crc kubenswrapper[4854]: I1125 10:01:28.936254 4854 generic.go:334] "Generic (PLEG): container finished" podID="709d7464-52a2-4fe9-a323-07839ae3d12d" containerID="6712b217b2dda854e96bac8ff06d92e1386e16dc04b489f571ceb05955decbe7" exitCode=0 Nov 25 10:01:28 crc kubenswrapper[4854]: I1125 10:01:28.936299 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"709d7464-52a2-4fe9-a323-07839ae3d12d","Type":"ContainerDied","Data":"6005a32a92f457ecc39461369892e5e4224345975f9bcecc08feae19a9cddf1c"} Nov 25 10:01:28 crc kubenswrapper[4854]: I1125 10:01:28.936397 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"709d7464-52a2-4fe9-a323-07839ae3d12d","Type":"ContainerDied","Data":"7c17dc11d805d5a3ec3f2e7e676d83375900a32591c255dfacd5a183c3c250e2"} Nov 25 10:01:28 crc kubenswrapper[4854]: I1125 10:01:28.936413 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"709d7464-52a2-4fe9-a323-07839ae3d12d","Type":"ContainerDied","Data":"6712b217b2dda854e96bac8ff06d92e1386e16dc04b489f571ceb05955decbe7"} Nov 25 10:01:28 crc kubenswrapper[4854]: I1125 10:01:28.937829 4854 generic.go:334] "Generic (PLEG): container finished" podID="6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e" containerID="3314a63e0071bc5d7dc57371cc76b1b62ccfff082df6b9ddc8b4a10c9b26be06" exitCode=0 Nov 25 10:01:28 crc kubenswrapper[4854]: I1125 10:01:28.939906 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-568d7fd7cf-77kl2" Nov 25 10:01:28 crc kubenswrapper[4854]: I1125 10:01:28.940741 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-568d7fd7cf-77kl2" event={"ID":"6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e","Type":"ContainerDied","Data":"3314a63e0071bc5d7dc57371cc76b1b62ccfff082df6b9ddc8b4a10c9b26be06"} Nov 25 10:01:28 crc kubenswrapper[4854]: I1125 10:01:28.940779 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-568d7fd7cf-77kl2" event={"ID":"6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e","Type":"ContainerDied","Data":"ec9456cb159934f6b3334ba6dd272ae3357035210d34cb8bec08f20a9a30bf5c"} Nov 25 10:01:28 crc kubenswrapper[4854]: I1125 10:01:28.940804 4854 scope.go:117] "RemoveContainer" containerID="3314a63e0071bc5d7dc57371cc76b1b62ccfff082df6b9ddc8b4a10c9b26be06" Nov 25 10:01:28 crc kubenswrapper[4854]: I1125 10:01:28.990448 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e-config" (OuterVolumeSpecName: "config") pod "6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e" (UID: "6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:01:28 crc kubenswrapper[4854]: I1125 10:01:28.992592 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e" (UID: "6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:01:28 crc kubenswrapper[4854]: I1125 10:01:28.999885 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e" (UID: "6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:01:29 crc kubenswrapper[4854]: I1125 10:01:29.006816 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e" (UID: "6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:01:29 crc kubenswrapper[4854]: I1125 10:01:29.012574 4854 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:29 crc kubenswrapper[4854]: I1125 10:01:29.012600 4854 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:29 crc kubenswrapper[4854]: I1125 10:01:29.012609 4854 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:29 crc kubenswrapper[4854]: I1125 10:01:29.012619 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2xrsm\" (UniqueName: \"kubernetes.io/projected/6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e-kube-api-access-2xrsm\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:29 crc kubenswrapper[4854]: I1125 10:01:29.012631 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:29 crc kubenswrapper[4854]: I1125 10:01:29.018746 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e" (UID: "6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:01:29 crc kubenswrapper[4854]: I1125 10:01:29.028643 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 10:01:29 crc kubenswrapper[4854]: I1125 10:01:29.028699 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 10:01:29 crc kubenswrapper[4854]: I1125 10:01:29.093903 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:01:29 crc kubenswrapper[4854]: I1125 10:01:29.114171 4854 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:29 crc kubenswrapper[4854]: I1125 10:01:29.124708 4854 scope.go:117] "RemoveContainer" containerID="920cbe581a4a32d17e67a15b93fa15f460c7c4807c110ded6bace38167da8886" Nov 25 10:01:29 crc kubenswrapper[4854]: I1125 10:01:29.164432 4854 scope.go:117] "RemoveContainer" containerID="3314a63e0071bc5d7dc57371cc76b1b62ccfff082df6b9ddc8b4a10c9b26be06" Nov 25 10:01:29 crc kubenswrapper[4854]: E1125 10:01:29.165247 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3314a63e0071bc5d7dc57371cc76b1b62ccfff082df6b9ddc8b4a10c9b26be06\": container with ID starting with 3314a63e0071bc5d7dc57371cc76b1b62ccfff082df6b9ddc8b4a10c9b26be06 not found: ID does not exist" containerID="3314a63e0071bc5d7dc57371cc76b1b62ccfff082df6b9ddc8b4a10c9b26be06" Nov 25 10:01:29 crc kubenswrapper[4854]: I1125 10:01:29.165289 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3314a63e0071bc5d7dc57371cc76b1b62ccfff082df6b9ddc8b4a10c9b26be06"} err="failed to get container status \"3314a63e0071bc5d7dc57371cc76b1b62ccfff082df6b9ddc8b4a10c9b26be06\": rpc error: code = NotFound desc = could not find container \"3314a63e0071bc5d7dc57371cc76b1b62ccfff082df6b9ddc8b4a10c9b26be06\": container with ID starting with 3314a63e0071bc5d7dc57371cc76b1b62ccfff082df6b9ddc8b4a10c9b26be06 not found: ID does not exist" Nov 25 10:01:29 crc kubenswrapper[4854]: I1125 10:01:29.165335 4854 scope.go:117] "RemoveContainer" containerID="920cbe581a4a32d17e67a15b93fa15f460c7c4807c110ded6bace38167da8886" Nov 25 10:01:29 crc kubenswrapper[4854]: E1125 10:01:29.165931 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"920cbe581a4a32d17e67a15b93fa15f460c7c4807c110ded6bace38167da8886\": container with ID starting with 920cbe581a4a32d17e67a15b93fa15f460c7c4807c110ded6bace38167da8886 not found: ID does not exist" containerID="920cbe581a4a32d17e67a15b93fa15f460c7c4807c110ded6bace38167da8886" Nov 25 10:01:29 crc kubenswrapper[4854]: I1125 10:01:29.165976 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"920cbe581a4a32d17e67a15b93fa15f460c7c4807c110ded6bace38167da8886"} err="failed to get container status \"920cbe581a4a32d17e67a15b93fa15f460c7c4807c110ded6bace38167da8886\": rpc error: code = NotFound desc = could not find container \"920cbe581a4a32d17e67a15b93fa15f460c7c4807c110ded6bace38167da8886\": container with ID starting with 920cbe581a4a32d17e67a15b93fa15f460c7c4807c110ded6bace38167da8886 not found: ID does not exist" Nov 25 10:01:29 crc kubenswrapper[4854]: I1125 10:01:29.280405 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-568d7fd7cf-77kl2"] Nov 25 10:01:29 crc kubenswrapper[4854]: I1125 10:01:29.303097 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-568d7fd7cf-77kl2"] Nov 25 10:01:30 crc kubenswrapper[4854]: I1125 10:01:30.965115 4854 generic.go:334] "Generic (PLEG): container finished" podID="709d7464-52a2-4fe9-a323-07839ae3d12d" containerID="1362a87b081619af6dffcf73233e4d02a63af60463d3b7171e8c066acbe1a3d6" exitCode=0 Nov 25 10:01:30 crc kubenswrapper[4854]: I1125 10:01:30.965183 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"709d7464-52a2-4fe9-a323-07839ae3d12d","Type":"ContainerDied","Data":"1362a87b081619af6dffcf73233e4d02a63af60463d3b7171e8c066acbe1a3d6"} Nov 25 10:01:31 crc kubenswrapper[4854]: I1125 10:01:31.033017 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e" path="/var/lib/kubelet/pods/6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e/volumes" Nov 25 10:01:31 crc kubenswrapper[4854]: E1125 10:01:31.164816 4854 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda16432da_436b_4d4c_b383_e009ff4a4ff6.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda16432da_436b_4d4c_b383_e009ff4a4ff6.slice/crio-fc1ce70cb30329b8b1d10d10efe6bb7b931f1af836e0f1bdb147f97c6bbd3eec\": RecentStats: unable to find data in memory cache]" Nov 25 10:01:31 crc kubenswrapper[4854]: I1125 10:01:31.366618 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:01:31 crc kubenswrapper[4854]: I1125 10:01:31.377652 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/709d7464-52a2-4fe9-a323-07839ae3d12d-config-data\") pod \"709d7464-52a2-4fe9-a323-07839ae3d12d\" (UID: \"709d7464-52a2-4fe9-a323-07839ae3d12d\") " Nov 25 10:01:31 crc kubenswrapper[4854]: I1125 10:01:31.377756 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/709d7464-52a2-4fe9-a323-07839ae3d12d-scripts\") pod \"709d7464-52a2-4fe9-a323-07839ae3d12d\" (UID: \"709d7464-52a2-4fe9-a323-07839ae3d12d\") " Nov 25 10:01:31 crc kubenswrapper[4854]: I1125 10:01:31.377790 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/709d7464-52a2-4fe9-a323-07839ae3d12d-combined-ca-bundle\") pod \"709d7464-52a2-4fe9-a323-07839ae3d12d\" (UID: \"709d7464-52a2-4fe9-a323-07839ae3d12d\") " Nov 25 10:01:31 crc kubenswrapper[4854]: I1125 10:01:31.377830 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/709d7464-52a2-4fe9-a323-07839ae3d12d-ceilometer-tls-certs\") pod \"709d7464-52a2-4fe9-a323-07839ae3d12d\" (UID: \"709d7464-52a2-4fe9-a323-07839ae3d12d\") " Nov 25 10:01:31 crc kubenswrapper[4854]: I1125 10:01:31.377872 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nv898\" (UniqueName: \"kubernetes.io/projected/709d7464-52a2-4fe9-a323-07839ae3d12d-kube-api-access-nv898\") pod \"709d7464-52a2-4fe9-a323-07839ae3d12d\" (UID: \"709d7464-52a2-4fe9-a323-07839ae3d12d\") " Nov 25 10:01:31 crc kubenswrapper[4854]: I1125 10:01:31.377942 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/709d7464-52a2-4fe9-a323-07839ae3d12d-run-httpd\") pod \"709d7464-52a2-4fe9-a323-07839ae3d12d\" (UID: \"709d7464-52a2-4fe9-a323-07839ae3d12d\") " Nov 25 10:01:31 crc kubenswrapper[4854]: I1125 10:01:31.377976 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/709d7464-52a2-4fe9-a323-07839ae3d12d-log-httpd\") pod \"709d7464-52a2-4fe9-a323-07839ae3d12d\" (UID: \"709d7464-52a2-4fe9-a323-07839ae3d12d\") " Nov 25 10:01:31 crc kubenswrapper[4854]: I1125 10:01:31.378006 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/709d7464-52a2-4fe9-a323-07839ae3d12d-sg-core-conf-yaml\") pod \"709d7464-52a2-4fe9-a323-07839ae3d12d\" (UID: \"709d7464-52a2-4fe9-a323-07839ae3d12d\") " Nov 25 10:01:31 crc kubenswrapper[4854]: I1125 10:01:31.378499 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/709d7464-52a2-4fe9-a323-07839ae3d12d-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "709d7464-52a2-4fe9-a323-07839ae3d12d" (UID: "709d7464-52a2-4fe9-a323-07839ae3d12d"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:01:31 crc kubenswrapper[4854]: I1125 10:01:31.378592 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/709d7464-52a2-4fe9-a323-07839ae3d12d-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "709d7464-52a2-4fe9-a323-07839ae3d12d" (UID: "709d7464-52a2-4fe9-a323-07839ae3d12d"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:01:31 crc kubenswrapper[4854]: I1125 10:01:31.383583 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/709d7464-52a2-4fe9-a323-07839ae3d12d-kube-api-access-nv898" (OuterVolumeSpecName: "kube-api-access-nv898") pod "709d7464-52a2-4fe9-a323-07839ae3d12d" (UID: "709d7464-52a2-4fe9-a323-07839ae3d12d"). InnerVolumeSpecName "kube-api-access-nv898". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:01:31 crc kubenswrapper[4854]: I1125 10:01:31.384341 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/709d7464-52a2-4fe9-a323-07839ae3d12d-scripts" (OuterVolumeSpecName: "scripts") pod "709d7464-52a2-4fe9-a323-07839ae3d12d" (UID: "709d7464-52a2-4fe9-a323-07839ae3d12d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:31 crc kubenswrapper[4854]: I1125 10:01:31.417868 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/709d7464-52a2-4fe9-a323-07839ae3d12d-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "709d7464-52a2-4fe9-a323-07839ae3d12d" (UID: "709d7464-52a2-4fe9-a323-07839ae3d12d"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:31 crc kubenswrapper[4854]: I1125 10:01:31.477456 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/709d7464-52a2-4fe9-a323-07839ae3d12d-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "709d7464-52a2-4fe9-a323-07839ae3d12d" (UID: "709d7464-52a2-4fe9-a323-07839ae3d12d"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:31 crc kubenswrapper[4854]: I1125 10:01:31.481404 4854 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/709d7464-52a2-4fe9-a323-07839ae3d12d-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:31 crc kubenswrapper[4854]: I1125 10:01:31.481436 4854 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/709d7464-52a2-4fe9-a323-07839ae3d12d-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:31 crc kubenswrapper[4854]: I1125 10:01:31.481445 4854 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/709d7464-52a2-4fe9-a323-07839ae3d12d-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:31 crc kubenswrapper[4854]: I1125 10:01:31.481454 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nv898\" (UniqueName: \"kubernetes.io/projected/709d7464-52a2-4fe9-a323-07839ae3d12d-kube-api-access-nv898\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:31 crc kubenswrapper[4854]: I1125 10:01:31.481467 4854 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/709d7464-52a2-4fe9-a323-07839ae3d12d-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:31 crc kubenswrapper[4854]: I1125 10:01:31.481476 4854 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/709d7464-52a2-4fe9-a323-07839ae3d12d-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:31 crc kubenswrapper[4854]: I1125 10:01:31.489457 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/709d7464-52a2-4fe9-a323-07839ae3d12d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "709d7464-52a2-4fe9-a323-07839ae3d12d" (UID: "709d7464-52a2-4fe9-a323-07839ae3d12d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:31 crc kubenswrapper[4854]: I1125 10:01:31.507943 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/709d7464-52a2-4fe9-a323-07839ae3d12d-config-data" (OuterVolumeSpecName: "config-data") pod "709d7464-52a2-4fe9-a323-07839ae3d12d" (UID: "709d7464-52a2-4fe9-a323-07839ae3d12d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:31 crc kubenswrapper[4854]: I1125 10:01:31.559285 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 25 10:01:31 crc kubenswrapper[4854]: I1125 10:01:31.584212 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/709d7464-52a2-4fe9-a323-07839ae3d12d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:31 crc kubenswrapper[4854]: I1125 10:01:31.584243 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/709d7464-52a2-4fe9-a323-07839ae3d12d-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:31 crc kubenswrapper[4854]: I1125 10:01:31.990027 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"709d7464-52a2-4fe9-a323-07839ae3d12d","Type":"ContainerDied","Data":"3f171cfad85b0e9666f17d073b403bb6a2ae9c3b31ef91394141ad7e7b9d9676"} Nov 25 10:01:31 crc kubenswrapper[4854]: I1125 10:01:31.990442 4854 scope.go:117] "RemoveContainer" containerID="6005a32a92f457ecc39461369892e5e4224345975f9bcecc08feae19a9cddf1c" Nov 25 10:01:31 crc kubenswrapper[4854]: I1125 10:01:31.990642 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.042081 4854 scope.go:117] "RemoveContainer" containerID="7c17dc11d805d5a3ec3f2e7e676d83375900a32591c255dfacd5a183c3c250e2" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.052966 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.076657 4854 scope.go:117] "RemoveContainer" containerID="6712b217b2dda854e96bac8ff06d92e1386e16dc04b489f571ceb05955decbe7" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.079335 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.094358 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:01:32 crc kubenswrapper[4854]: E1125 10:01:32.094886 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e" containerName="dnsmasq-dns" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.094904 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e" containerName="dnsmasq-dns" Nov 25 10:01:32 crc kubenswrapper[4854]: E1125 10:01:32.094921 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e" containerName="init" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.094928 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e" containerName="init" Nov 25 10:01:32 crc kubenswrapper[4854]: E1125 10:01:32.094951 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="709d7464-52a2-4fe9-a323-07839ae3d12d" containerName="sg-core" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.094958 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="709d7464-52a2-4fe9-a323-07839ae3d12d" containerName="sg-core" Nov 25 10:01:32 crc kubenswrapper[4854]: E1125 10:01:32.094977 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="709d7464-52a2-4fe9-a323-07839ae3d12d" containerName="ceilometer-central-agent" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.094983 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="709d7464-52a2-4fe9-a323-07839ae3d12d" containerName="ceilometer-central-agent" Nov 25 10:01:32 crc kubenswrapper[4854]: E1125 10:01:32.094996 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="709d7464-52a2-4fe9-a323-07839ae3d12d" containerName="proxy-httpd" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.095002 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="709d7464-52a2-4fe9-a323-07839ae3d12d" containerName="proxy-httpd" Nov 25 10:01:32 crc kubenswrapper[4854]: E1125 10:01:32.095018 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="709d7464-52a2-4fe9-a323-07839ae3d12d" containerName="ceilometer-notification-agent" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.095025 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="709d7464-52a2-4fe9-a323-07839ae3d12d" containerName="ceilometer-notification-agent" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.095244 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="709d7464-52a2-4fe9-a323-07839ae3d12d" containerName="ceilometer-notification-agent" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.095260 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c4507e5-033b-4f2f-8af7-2f08c5cf7f5e" containerName="dnsmasq-dns" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.095268 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="709d7464-52a2-4fe9-a323-07839ae3d12d" containerName="ceilometer-central-agent" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.095281 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="709d7464-52a2-4fe9-a323-07839ae3d12d" containerName="sg-core" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.095296 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="709d7464-52a2-4fe9-a323-07839ae3d12d" containerName="proxy-httpd" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.097422 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.100887 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.101019 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.101093 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.106464 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.116645 4854 scope.go:117] "RemoveContainer" containerID="1362a87b081619af6dffcf73233e4d02a63af60463d3b7171e8c066acbe1a3d6" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.208269 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-config-data\") pod \"ceilometer-0\" (UID: \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\") " pod="openstack/ceilometer-0" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.208384 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-run-httpd\") pod \"ceilometer-0\" (UID: \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\") " pod="openstack/ceilometer-0" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.208440 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-scripts\") pod \"ceilometer-0\" (UID: \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\") " pod="openstack/ceilometer-0" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.208482 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-log-httpd\") pod \"ceilometer-0\" (UID: \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\") " pod="openstack/ceilometer-0" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.208508 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\") " pod="openstack/ceilometer-0" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.208570 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\") " pod="openstack/ceilometer-0" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.208594 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\") " pod="openstack/ceilometer-0" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.208740 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xfb8m\" (UniqueName: \"kubernetes.io/projected/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-kube-api-access-xfb8m\") pod \"ceilometer-0\" (UID: \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\") " pod="openstack/ceilometer-0" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.310073 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-config-data\") pod \"ceilometer-0\" (UID: \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\") " pod="openstack/ceilometer-0" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.310188 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-run-httpd\") pod \"ceilometer-0\" (UID: \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\") " pod="openstack/ceilometer-0" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.310264 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-scripts\") pod \"ceilometer-0\" (UID: \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\") " pod="openstack/ceilometer-0" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.310310 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-log-httpd\") pod \"ceilometer-0\" (UID: \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\") " pod="openstack/ceilometer-0" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.310336 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\") " pod="openstack/ceilometer-0" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.310403 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\") " pod="openstack/ceilometer-0" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.310423 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\") " pod="openstack/ceilometer-0" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.310492 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xfb8m\" (UniqueName: \"kubernetes.io/projected/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-kube-api-access-xfb8m\") pod \"ceilometer-0\" (UID: \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\") " pod="openstack/ceilometer-0" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.311235 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-log-httpd\") pod \"ceilometer-0\" (UID: \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\") " pod="openstack/ceilometer-0" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.311345 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-run-httpd\") pod \"ceilometer-0\" (UID: \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\") " pod="openstack/ceilometer-0" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.316368 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\") " pod="openstack/ceilometer-0" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.316501 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-scripts\") pod \"ceilometer-0\" (UID: \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\") " pod="openstack/ceilometer-0" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.317663 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\") " pod="openstack/ceilometer-0" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.323368 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\") " pod="openstack/ceilometer-0" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.328134 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xfb8m\" (UniqueName: \"kubernetes.io/projected/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-kube-api-access-xfb8m\") pod \"ceilometer-0\" (UID: \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\") " pod="openstack/ceilometer-0" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.336108 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-config-data\") pod \"ceilometer-0\" (UID: \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\") " pod="openstack/ceilometer-0" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.414449 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:01:32 crc kubenswrapper[4854]: I1125 10:01:32.982807 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:01:33 crc kubenswrapper[4854]: I1125 10:01:33.009531 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a0da5a5f-c737-4ad7-a34a-fbad8d68866f","Type":"ContainerStarted","Data":"c5fd2efbce63049501009ac64db22d9cfd04dd865afd0efaff354dde42e03897"} Nov 25 10:01:33 crc kubenswrapper[4854]: I1125 10:01:33.028299 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="709d7464-52a2-4fe9-a323-07839ae3d12d" path="/var/lib/kubelet/pods/709d7464-52a2-4fe9-a323-07839ae3d12d/volumes" Nov 25 10:01:34 crc kubenswrapper[4854]: I1125 10:01:34.021962 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 10:01:34 crc kubenswrapper[4854]: I1125 10:01:34.022354 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 10:01:34 crc kubenswrapper[4854]: I1125 10:01:34.094369 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:01:34 crc kubenswrapper[4854]: I1125 10:01:34.115521 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:01:35 crc kubenswrapper[4854]: I1125 10:01:35.033822 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="376f9383-a48b-42f0-be32-cf24789335d8" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.1.1:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 10:01:35 crc kubenswrapper[4854]: I1125 10:01:35.033826 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="376f9383-a48b-42f0-be32-cf24789335d8" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.1.1:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 10:01:35 crc kubenswrapper[4854]: I1125 10:01:35.045934 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a0da5a5f-c737-4ad7-a34a-fbad8d68866f","Type":"ContainerStarted","Data":"2afc36587dadc064b7c6674060754426dfb229a6e026f335c26f27fe2996e9e2"} Nov 25 10:01:35 crc kubenswrapper[4854]: I1125 10:01:35.046153 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a0da5a5f-c737-4ad7-a34a-fbad8d68866f","Type":"ContainerStarted","Data":"18960e030db434f11285c8b29c4c459dd6e7da813d874967292f260e5804bb93"} Nov 25 10:01:35 crc kubenswrapper[4854]: I1125 10:01:35.063171 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:01:35 crc kubenswrapper[4854]: I1125 10:01:35.237117 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-nl4hd"] Nov 25 10:01:35 crc kubenswrapper[4854]: I1125 10:01:35.238953 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-nl4hd" Nov 25 10:01:35 crc kubenswrapper[4854]: I1125 10:01:35.246872 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 25 10:01:35 crc kubenswrapper[4854]: I1125 10:01:35.246907 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 25 10:01:35 crc kubenswrapper[4854]: I1125 10:01:35.254521 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-nl4hd"] Nov 25 10:01:35 crc kubenswrapper[4854]: I1125 10:01:35.313777 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 10:01:35 crc kubenswrapper[4854]: I1125 10:01:35.313849 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 10:01:35 crc kubenswrapper[4854]: I1125 10:01:35.396313 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e27e9250-032d-44da-8819-ce560d4f9c3f-config-data\") pod \"nova-cell1-cell-mapping-nl4hd\" (UID: \"e27e9250-032d-44da-8819-ce560d4f9c3f\") " pod="openstack/nova-cell1-cell-mapping-nl4hd" Nov 25 10:01:35 crc kubenswrapper[4854]: I1125 10:01:35.396453 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e27e9250-032d-44da-8819-ce560d4f9c3f-scripts\") pod \"nova-cell1-cell-mapping-nl4hd\" (UID: \"e27e9250-032d-44da-8819-ce560d4f9c3f\") " pod="openstack/nova-cell1-cell-mapping-nl4hd" Nov 25 10:01:35 crc kubenswrapper[4854]: I1125 10:01:35.396508 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e27e9250-032d-44da-8819-ce560d4f9c3f-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-nl4hd\" (UID: \"e27e9250-032d-44da-8819-ce560d4f9c3f\") " pod="openstack/nova-cell1-cell-mapping-nl4hd" Nov 25 10:01:35 crc kubenswrapper[4854]: I1125 10:01:35.396906 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v9xgh\" (UniqueName: \"kubernetes.io/projected/e27e9250-032d-44da-8819-ce560d4f9c3f-kube-api-access-v9xgh\") pod \"nova-cell1-cell-mapping-nl4hd\" (UID: \"e27e9250-032d-44da-8819-ce560d4f9c3f\") " pod="openstack/nova-cell1-cell-mapping-nl4hd" Nov 25 10:01:35 crc kubenswrapper[4854]: I1125 10:01:35.499045 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e27e9250-032d-44da-8819-ce560d4f9c3f-config-data\") pod \"nova-cell1-cell-mapping-nl4hd\" (UID: \"e27e9250-032d-44da-8819-ce560d4f9c3f\") " pod="openstack/nova-cell1-cell-mapping-nl4hd" Nov 25 10:01:35 crc kubenswrapper[4854]: I1125 10:01:35.499175 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e27e9250-032d-44da-8819-ce560d4f9c3f-scripts\") pod \"nova-cell1-cell-mapping-nl4hd\" (UID: \"e27e9250-032d-44da-8819-ce560d4f9c3f\") " pod="openstack/nova-cell1-cell-mapping-nl4hd" Nov 25 10:01:35 crc kubenswrapper[4854]: I1125 10:01:35.499211 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e27e9250-032d-44da-8819-ce560d4f9c3f-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-nl4hd\" (UID: \"e27e9250-032d-44da-8819-ce560d4f9c3f\") " pod="openstack/nova-cell1-cell-mapping-nl4hd" Nov 25 10:01:35 crc kubenswrapper[4854]: I1125 10:01:35.499271 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v9xgh\" (UniqueName: \"kubernetes.io/projected/e27e9250-032d-44da-8819-ce560d4f9c3f-kube-api-access-v9xgh\") pod \"nova-cell1-cell-mapping-nl4hd\" (UID: \"e27e9250-032d-44da-8819-ce560d4f9c3f\") " pod="openstack/nova-cell1-cell-mapping-nl4hd" Nov 25 10:01:35 crc kubenswrapper[4854]: I1125 10:01:35.506316 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e27e9250-032d-44da-8819-ce560d4f9c3f-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-nl4hd\" (UID: \"e27e9250-032d-44da-8819-ce560d4f9c3f\") " pod="openstack/nova-cell1-cell-mapping-nl4hd" Nov 25 10:01:35 crc kubenswrapper[4854]: I1125 10:01:35.515177 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e27e9250-032d-44da-8819-ce560d4f9c3f-scripts\") pod \"nova-cell1-cell-mapping-nl4hd\" (UID: \"e27e9250-032d-44da-8819-ce560d4f9c3f\") " pod="openstack/nova-cell1-cell-mapping-nl4hd" Nov 25 10:01:35 crc kubenswrapper[4854]: I1125 10:01:35.516628 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e27e9250-032d-44da-8819-ce560d4f9c3f-config-data\") pod \"nova-cell1-cell-mapping-nl4hd\" (UID: \"e27e9250-032d-44da-8819-ce560d4f9c3f\") " pod="openstack/nova-cell1-cell-mapping-nl4hd" Nov 25 10:01:35 crc kubenswrapper[4854]: I1125 10:01:35.521788 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v9xgh\" (UniqueName: \"kubernetes.io/projected/e27e9250-032d-44da-8819-ce560d4f9c3f-kube-api-access-v9xgh\") pod \"nova-cell1-cell-mapping-nl4hd\" (UID: \"e27e9250-032d-44da-8819-ce560d4f9c3f\") " pod="openstack/nova-cell1-cell-mapping-nl4hd" Nov 25 10:01:35 crc kubenswrapper[4854]: I1125 10:01:35.569952 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-nl4hd" Nov 25 10:01:36 crc kubenswrapper[4854]: I1125 10:01:36.060079 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a0da5a5f-c737-4ad7-a34a-fbad8d68866f","Type":"ContainerStarted","Data":"36e3cefde07716dbaa8af808a30b0614a66ec14c198be98dae5c922c69998bdc"} Nov 25 10:01:36 crc kubenswrapper[4854]: I1125 10:01:36.235145 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-nl4hd"] Nov 25 10:01:36 crc kubenswrapper[4854]: I1125 10:01:36.327876 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="c8c89199-292c-4224-9034-550542c29686" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.1.3:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 10:01:36 crc kubenswrapper[4854]: I1125 10:01:36.327880 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="c8c89199-292c-4224-9034-550542c29686" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.1.3:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 10:01:36 crc kubenswrapper[4854]: I1125 10:01:36.559742 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 25 10:01:36 crc kubenswrapper[4854]: I1125 10:01:36.601994 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 25 10:01:37 crc kubenswrapper[4854]: I1125 10:01:37.073623 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-nl4hd" event={"ID":"e27e9250-032d-44da-8819-ce560d4f9c3f","Type":"ContainerStarted","Data":"fef0d09ac9cddbbf9f4f7cb64f43adf59990ebece51f482f846184bdc7f7e414"} Nov 25 10:01:37 crc kubenswrapper[4854]: I1125 10:01:37.074230 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-nl4hd" event={"ID":"e27e9250-032d-44da-8819-ce560d4f9c3f","Type":"ContainerStarted","Data":"cb4a40a73e9118fc621c05abd050f81b0f4220e21cfa7517d3fcb294bbb8a1af"} Nov 25 10:01:37 crc kubenswrapper[4854]: I1125 10:01:37.102727 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-nl4hd" podStartSLOduration=2.102701479 podStartE2EDuration="2.102701479s" podCreationTimestamp="2025-11-25 10:01:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:01:37.089290638 +0000 UTC m=+1502.942284024" watchObservedRunningTime="2025-11-25 10:01:37.102701479 +0000 UTC m=+1502.955694855" Nov 25 10:01:37 crc kubenswrapper[4854]: I1125 10:01:37.124475 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 25 10:01:38 crc kubenswrapper[4854]: I1125 10:01:38.090902 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a0da5a5f-c737-4ad7-a34a-fbad8d68866f","Type":"ContainerStarted","Data":"ee3561d06464e30cc212bf33139332d9545746700109d217afa39e7206b50e2c"} Nov 25 10:01:39 crc kubenswrapper[4854]: I1125 10:01:39.101137 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 10:01:39 crc kubenswrapper[4854]: I1125 10:01:39.136007 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.493721014 podStartE2EDuration="7.135979666s" podCreationTimestamp="2025-11-25 10:01:32 +0000 UTC" firstStartedPulling="2025-11-25 10:01:32.992637942 +0000 UTC m=+1498.845631318" lastFinishedPulling="2025-11-25 10:01:37.634896604 +0000 UTC m=+1503.487889970" observedRunningTime="2025-11-25 10:01:39.123187592 +0000 UTC m=+1504.976180978" watchObservedRunningTime="2025-11-25 10:01:39.135979666 +0000 UTC m=+1504.988973042" Nov 25 10:01:41 crc kubenswrapper[4854]: E1125 10:01:41.647568 4854 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode9b28076_56b0_47d7_a0b5_f82956ea494a.slice/crio-conmon-532e8e0a4021f2fe09a70c15e567d2dce217368fdd813a7fa6a538670e24324f.scope\": RecentStats: unable to find data in memory cache]" Nov 25 10:01:41 crc kubenswrapper[4854]: I1125 10:01:41.867448 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 25 10:01:41 crc kubenswrapper[4854]: I1125 10:01:41.973848 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9b28076-56b0-47d7-a0b5-f82956ea494a-scripts\") pod \"e9b28076-56b0-47d7-a0b5-f82956ea494a\" (UID: \"e9b28076-56b0-47d7-a0b5-f82956ea494a\") " Nov 25 10:01:41 crc kubenswrapper[4854]: I1125 10:01:41.973894 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qfnmg\" (UniqueName: \"kubernetes.io/projected/e9b28076-56b0-47d7-a0b5-f82956ea494a-kube-api-access-qfnmg\") pod \"e9b28076-56b0-47d7-a0b5-f82956ea494a\" (UID: \"e9b28076-56b0-47d7-a0b5-f82956ea494a\") " Nov 25 10:01:41 crc kubenswrapper[4854]: I1125 10:01:41.973975 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9b28076-56b0-47d7-a0b5-f82956ea494a-config-data\") pod \"e9b28076-56b0-47d7-a0b5-f82956ea494a\" (UID: \"e9b28076-56b0-47d7-a0b5-f82956ea494a\") " Nov 25 10:01:41 crc kubenswrapper[4854]: I1125 10:01:41.974123 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9b28076-56b0-47d7-a0b5-f82956ea494a-combined-ca-bundle\") pod \"e9b28076-56b0-47d7-a0b5-f82956ea494a\" (UID: \"e9b28076-56b0-47d7-a0b5-f82956ea494a\") " Nov 25 10:01:41 crc kubenswrapper[4854]: I1125 10:01:41.987399 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e9b28076-56b0-47d7-a0b5-f82956ea494a-kube-api-access-qfnmg" (OuterVolumeSpecName: "kube-api-access-qfnmg") pod "e9b28076-56b0-47d7-a0b5-f82956ea494a" (UID: "e9b28076-56b0-47d7-a0b5-f82956ea494a"). InnerVolumeSpecName "kube-api-access-qfnmg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:01:41 crc kubenswrapper[4854]: I1125 10:01:41.997039 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9b28076-56b0-47d7-a0b5-f82956ea494a-scripts" (OuterVolumeSpecName: "scripts") pod "e9b28076-56b0-47d7-a0b5-f82956ea494a" (UID: "e9b28076-56b0-47d7-a0b5-f82956ea494a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.077344 4854 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9b28076-56b0-47d7-a0b5-f82956ea494a-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.077375 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qfnmg\" (UniqueName: \"kubernetes.io/projected/e9b28076-56b0-47d7-a0b5-f82956ea494a-kube-api-access-qfnmg\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.107538 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9b28076-56b0-47d7-a0b5-f82956ea494a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e9b28076-56b0-47d7-a0b5-f82956ea494a" (UID: "e9b28076-56b0-47d7-a0b5-f82956ea494a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.120895 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9b28076-56b0-47d7-a0b5-f82956ea494a-config-data" (OuterVolumeSpecName: "config-data") pod "e9b28076-56b0-47d7-a0b5-f82956ea494a" (UID: "e9b28076-56b0-47d7-a0b5-f82956ea494a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.133173 4854 generic.go:334] "Generic (PLEG): container finished" podID="e27e9250-032d-44da-8819-ce560d4f9c3f" containerID="fef0d09ac9cddbbf9f4f7cb64f43adf59990ebece51f482f846184bdc7f7e414" exitCode=0 Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.133333 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-nl4hd" event={"ID":"e27e9250-032d-44da-8819-ce560d4f9c3f","Type":"ContainerDied","Data":"fef0d09ac9cddbbf9f4f7cb64f43adf59990ebece51f482f846184bdc7f7e414"} Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.135851 4854 generic.go:334] "Generic (PLEG): container finished" podID="e9b28076-56b0-47d7-a0b5-f82956ea494a" containerID="532e8e0a4021f2fe09a70c15e567d2dce217368fdd813a7fa6a538670e24324f" exitCode=137 Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.136004 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e9b28076-56b0-47d7-a0b5-f82956ea494a","Type":"ContainerDied","Data":"532e8e0a4021f2fe09a70c15e567d2dce217368fdd813a7fa6a538670e24324f"} Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.136034 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e9b28076-56b0-47d7-a0b5-f82956ea494a","Type":"ContainerDied","Data":"1ab9cadd9c09a3a6ec7138425aa7ed22e7bb15d7aa6ca60e9457c2b811ebc998"} Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.136051 4854 scope.go:117] "RemoveContainer" containerID="532e8e0a4021f2fe09a70c15e567d2dce217368fdd813a7fa6a538670e24324f" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.136158 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.161908 4854 scope.go:117] "RemoveContainer" containerID="7da4eab9ec0ee3a10ad4e4fb28dbb1690c4aef49e5205cd9f264cd2d8389f7f6" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.182467 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9b28076-56b0-47d7-a0b5-f82956ea494a-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.182792 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9b28076-56b0-47d7-a0b5-f82956ea494a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.189647 4854 scope.go:117] "RemoveContainer" containerID="faa2552e1b2ce50a4cf818dd04e46468871f8cdf07be841a6e153bb1ba69320a" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.190540 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.223281 4854 scope.go:117] "RemoveContainer" containerID="61abe6eb764a470729cef52909a645b5a60402274b0a87ae475601835768bf01" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.247715 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-0"] Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.249104 4854 scope.go:117] "RemoveContainer" containerID="532e8e0a4021f2fe09a70c15e567d2dce217368fdd813a7fa6a538670e24324f" Nov 25 10:01:42 crc kubenswrapper[4854]: E1125 10:01:42.250657 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"532e8e0a4021f2fe09a70c15e567d2dce217368fdd813a7fa6a538670e24324f\": container with ID starting with 532e8e0a4021f2fe09a70c15e567d2dce217368fdd813a7fa6a538670e24324f not found: ID does not exist" containerID="532e8e0a4021f2fe09a70c15e567d2dce217368fdd813a7fa6a538670e24324f" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.250856 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"532e8e0a4021f2fe09a70c15e567d2dce217368fdd813a7fa6a538670e24324f"} err="failed to get container status \"532e8e0a4021f2fe09a70c15e567d2dce217368fdd813a7fa6a538670e24324f\": rpc error: code = NotFound desc = could not find container \"532e8e0a4021f2fe09a70c15e567d2dce217368fdd813a7fa6a538670e24324f\": container with ID starting with 532e8e0a4021f2fe09a70c15e567d2dce217368fdd813a7fa6a538670e24324f not found: ID does not exist" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.250883 4854 scope.go:117] "RemoveContainer" containerID="7da4eab9ec0ee3a10ad4e4fb28dbb1690c4aef49e5205cd9f264cd2d8389f7f6" Nov 25 10:01:42 crc kubenswrapper[4854]: E1125 10:01:42.251148 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7da4eab9ec0ee3a10ad4e4fb28dbb1690c4aef49e5205cd9f264cd2d8389f7f6\": container with ID starting with 7da4eab9ec0ee3a10ad4e4fb28dbb1690c4aef49e5205cd9f264cd2d8389f7f6 not found: ID does not exist" containerID="7da4eab9ec0ee3a10ad4e4fb28dbb1690c4aef49e5205cd9f264cd2d8389f7f6" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.251173 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7da4eab9ec0ee3a10ad4e4fb28dbb1690c4aef49e5205cd9f264cd2d8389f7f6"} err="failed to get container status \"7da4eab9ec0ee3a10ad4e4fb28dbb1690c4aef49e5205cd9f264cd2d8389f7f6\": rpc error: code = NotFound desc = could not find container \"7da4eab9ec0ee3a10ad4e4fb28dbb1690c4aef49e5205cd9f264cd2d8389f7f6\": container with ID starting with 7da4eab9ec0ee3a10ad4e4fb28dbb1690c4aef49e5205cd9f264cd2d8389f7f6 not found: ID does not exist" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.251186 4854 scope.go:117] "RemoveContainer" containerID="faa2552e1b2ce50a4cf818dd04e46468871f8cdf07be841a6e153bb1ba69320a" Nov 25 10:01:42 crc kubenswrapper[4854]: E1125 10:01:42.251405 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"faa2552e1b2ce50a4cf818dd04e46468871f8cdf07be841a6e153bb1ba69320a\": container with ID starting with faa2552e1b2ce50a4cf818dd04e46468871f8cdf07be841a6e153bb1ba69320a not found: ID does not exist" containerID="faa2552e1b2ce50a4cf818dd04e46468871f8cdf07be841a6e153bb1ba69320a" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.251439 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"faa2552e1b2ce50a4cf818dd04e46468871f8cdf07be841a6e153bb1ba69320a"} err="failed to get container status \"faa2552e1b2ce50a4cf818dd04e46468871f8cdf07be841a6e153bb1ba69320a\": rpc error: code = NotFound desc = could not find container \"faa2552e1b2ce50a4cf818dd04e46468871f8cdf07be841a6e153bb1ba69320a\": container with ID starting with faa2552e1b2ce50a4cf818dd04e46468871f8cdf07be841a6e153bb1ba69320a not found: ID does not exist" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.251452 4854 scope.go:117] "RemoveContainer" containerID="61abe6eb764a470729cef52909a645b5a60402274b0a87ae475601835768bf01" Nov 25 10:01:42 crc kubenswrapper[4854]: E1125 10:01:42.251790 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"61abe6eb764a470729cef52909a645b5a60402274b0a87ae475601835768bf01\": container with ID starting with 61abe6eb764a470729cef52909a645b5a60402274b0a87ae475601835768bf01 not found: ID does not exist" containerID="61abe6eb764a470729cef52909a645b5a60402274b0a87ae475601835768bf01" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.251832 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61abe6eb764a470729cef52909a645b5a60402274b0a87ae475601835768bf01"} err="failed to get container status \"61abe6eb764a470729cef52909a645b5a60402274b0a87ae475601835768bf01\": rpc error: code = NotFound desc = could not find container \"61abe6eb764a470729cef52909a645b5a60402274b0a87ae475601835768bf01\": container with ID starting with 61abe6eb764a470729cef52909a645b5a60402274b0a87ae475601835768bf01 not found: ID does not exist" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.264780 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Nov 25 10:01:42 crc kubenswrapper[4854]: E1125 10:01:42.265414 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9b28076-56b0-47d7-a0b5-f82956ea494a" containerName="aodh-notifier" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.265433 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9b28076-56b0-47d7-a0b5-f82956ea494a" containerName="aodh-notifier" Nov 25 10:01:42 crc kubenswrapper[4854]: E1125 10:01:42.265457 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9b28076-56b0-47d7-a0b5-f82956ea494a" containerName="aodh-listener" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.265464 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9b28076-56b0-47d7-a0b5-f82956ea494a" containerName="aodh-listener" Nov 25 10:01:42 crc kubenswrapper[4854]: E1125 10:01:42.265540 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9b28076-56b0-47d7-a0b5-f82956ea494a" containerName="aodh-api" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.265549 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9b28076-56b0-47d7-a0b5-f82956ea494a" containerName="aodh-api" Nov 25 10:01:42 crc kubenswrapper[4854]: E1125 10:01:42.265560 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9b28076-56b0-47d7-a0b5-f82956ea494a" containerName="aodh-evaluator" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.265567 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9b28076-56b0-47d7-a0b5-f82956ea494a" containerName="aodh-evaluator" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.267781 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9b28076-56b0-47d7-a0b5-f82956ea494a" containerName="aodh-evaluator" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.267845 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9b28076-56b0-47d7-a0b5-f82956ea494a" containerName="aodh-notifier" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.267904 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9b28076-56b0-47d7-a0b5-f82956ea494a" containerName="aodh-listener" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.267924 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9b28076-56b0-47d7-a0b5-f82956ea494a" containerName="aodh-api" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.272087 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.274252 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-public-svc" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.274563 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-jzshp" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.274810 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.274920 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-internal-svc" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.275567 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.278753 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.393289 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d92bb92b-df2f-4cf2-88c0-fe50081f16de-internal-tls-certs\") pod \"aodh-0\" (UID: \"d92bb92b-df2f-4cf2-88c0-fe50081f16de\") " pod="openstack/aodh-0" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.393437 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d92bb92b-df2f-4cf2-88c0-fe50081f16de-combined-ca-bundle\") pod \"aodh-0\" (UID: \"d92bb92b-df2f-4cf2-88c0-fe50081f16de\") " pod="openstack/aodh-0" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.393472 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d92bb92b-df2f-4cf2-88c0-fe50081f16de-scripts\") pod \"aodh-0\" (UID: \"d92bb92b-df2f-4cf2-88c0-fe50081f16de\") " pod="openstack/aodh-0" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.393559 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d92bb92b-df2f-4cf2-88c0-fe50081f16de-public-tls-certs\") pod \"aodh-0\" (UID: \"d92bb92b-df2f-4cf2-88c0-fe50081f16de\") " pod="openstack/aodh-0" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.393610 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d92bb92b-df2f-4cf2-88c0-fe50081f16de-config-data\") pod \"aodh-0\" (UID: \"d92bb92b-df2f-4cf2-88c0-fe50081f16de\") " pod="openstack/aodh-0" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.393663 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hf5fj\" (UniqueName: \"kubernetes.io/projected/d92bb92b-df2f-4cf2-88c0-fe50081f16de-kube-api-access-hf5fj\") pod \"aodh-0\" (UID: \"d92bb92b-df2f-4cf2-88c0-fe50081f16de\") " pod="openstack/aodh-0" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.494982 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d92bb92b-df2f-4cf2-88c0-fe50081f16de-internal-tls-certs\") pod \"aodh-0\" (UID: \"d92bb92b-df2f-4cf2-88c0-fe50081f16de\") " pod="openstack/aodh-0" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.495066 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d92bb92b-df2f-4cf2-88c0-fe50081f16de-combined-ca-bundle\") pod \"aodh-0\" (UID: \"d92bb92b-df2f-4cf2-88c0-fe50081f16de\") " pod="openstack/aodh-0" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.495088 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d92bb92b-df2f-4cf2-88c0-fe50081f16de-scripts\") pod \"aodh-0\" (UID: \"d92bb92b-df2f-4cf2-88c0-fe50081f16de\") " pod="openstack/aodh-0" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.495131 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d92bb92b-df2f-4cf2-88c0-fe50081f16de-public-tls-certs\") pod \"aodh-0\" (UID: \"d92bb92b-df2f-4cf2-88c0-fe50081f16de\") " pod="openstack/aodh-0" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.495174 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d92bb92b-df2f-4cf2-88c0-fe50081f16de-config-data\") pod \"aodh-0\" (UID: \"d92bb92b-df2f-4cf2-88c0-fe50081f16de\") " pod="openstack/aodh-0" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.495206 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hf5fj\" (UniqueName: \"kubernetes.io/projected/d92bb92b-df2f-4cf2-88c0-fe50081f16de-kube-api-access-hf5fj\") pod \"aodh-0\" (UID: \"d92bb92b-df2f-4cf2-88c0-fe50081f16de\") " pod="openstack/aodh-0" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.498971 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d92bb92b-df2f-4cf2-88c0-fe50081f16de-scripts\") pod \"aodh-0\" (UID: \"d92bb92b-df2f-4cf2-88c0-fe50081f16de\") " pod="openstack/aodh-0" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.499549 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d92bb92b-df2f-4cf2-88c0-fe50081f16de-internal-tls-certs\") pod \"aodh-0\" (UID: \"d92bb92b-df2f-4cf2-88c0-fe50081f16de\") " pod="openstack/aodh-0" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.499556 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d92bb92b-df2f-4cf2-88c0-fe50081f16de-combined-ca-bundle\") pod \"aodh-0\" (UID: \"d92bb92b-df2f-4cf2-88c0-fe50081f16de\") " pod="openstack/aodh-0" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.500558 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d92bb92b-df2f-4cf2-88c0-fe50081f16de-public-tls-certs\") pod \"aodh-0\" (UID: \"d92bb92b-df2f-4cf2-88c0-fe50081f16de\") " pod="openstack/aodh-0" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.502866 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d92bb92b-df2f-4cf2-88c0-fe50081f16de-config-data\") pod \"aodh-0\" (UID: \"d92bb92b-df2f-4cf2-88c0-fe50081f16de\") " pod="openstack/aodh-0" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.516084 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hf5fj\" (UniqueName: \"kubernetes.io/projected/d92bb92b-df2f-4cf2-88c0-fe50081f16de-kube-api-access-hf5fj\") pod \"aodh-0\" (UID: \"d92bb92b-df2f-4cf2-88c0-fe50081f16de\") " pod="openstack/aodh-0" Nov 25 10:01:42 crc kubenswrapper[4854]: I1125 10:01:42.593864 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 25 10:01:43 crc kubenswrapper[4854]: I1125 10:01:43.029151 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e9b28076-56b0-47d7-a0b5-f82956ea494a" path="/var/lib/kubelet/pods/e9b28076-56b0-47d7-a0b5-f82956ea494a/volumes" Nov 25 10:01:43 crc kubenswrapper[4854]: W1125 10:01:43.064580 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd92bb92b_df2f_4cf2_88c0_fe50081f16de.slice/crio-45dddcce40d9345b5807de49ff2b5c9e869805cc2115b4890f38ffe44da32626 WatchSource:0}: Error finding container 45dddcce40d9345b5807de49ff2b5c9e869805cc2115b4890f38ffe44da32626: Status 404 returned error can't find the container with id 45dddcce40d9345b5807de49ff2b5c9e869805cc2115b4890f38ffe44da32626 Nov 25 10:01:43 crc kubenswrapper[4854]: I1125 10:01:43.070983 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 25 10:01:43 crc kubenswrapper[4854]: I1125 10:01:43.163022 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"d92bb92b-df2f-4cf2-88c0-fe50081f16de","Type":"ContainerStarted","Data":"45dddcce40d9345b5807de49ff2b5c9e869805cc2115b4890f38ffe44da32626"} Nov 25 10:01:43 crc kubenswrapper[4854]: I1125 10:01:43.783872 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-nl4hd" Nov 25 10:01:43 crc kubenswrapper[4854]: I1125 10:01:43.825628 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v9xgh\" (UniqueName: \"kubernetes.io/projected/e27e9250-032d-44da-8819-ce560d4f9c3f-kube-api-access-v9xgh\") pod \"e27e9250-032d-44da-8819-ce560d4f9c3f\" (UID: \"e27e9250-032d-44da-8819-ce560d4f9c3f\") " Nov 25 10:01:43 crc kubenswrapper[4854]: I1125 10:01:43.825795 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e27e9250-032d-44da-8819-ce560d4f9c3f-combined-ca-bundle\") pod \"e27e9250-032d-44da-8819-ce560d4f9c3f\" (UID: \"e27e9250-032d-44da-8819-ce560d4f9c3f\") " Nov 25 10:01:43 crc kubenswrapper[4854]: I1125 10:01:43.825893 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e27e9250-032d-44da-8819-ce560d4f9c3f-config-data\") pod \"e27e9250-032d-44da-8819-ce560d4f9c3f\" (UID: \"e27e9250-032d-44da-8819-ce560d4f9c3f\") " Nov 25 10:01:43 crc kubenswrapper[4854]: I1125 10:01:43.825980 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e27e9250-032d-44da-8819-ce560d4f9c3f-scripts\") pod \"e27e9250-032d-44da-8819-ce560d4f9c3f\" (UID: \"e27e9250-032d-44da-8819-ce560d4f9c3f\") " Nov 25 10:01:43 crc kubenswrapper[4854]: I1125 10:01:43.843524 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e27e9250-032d-44da-8819-ce560d4f9c3f-scripts" (OuterVolumeSpecName: "scripts") pod "e27e9250-032d-44da-8819-ce560d4f9c3f" (UID: "e27e9250-032d-44da-8819-ce560d4f9c3f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:43 crc kubenswrapper[4854]: I1125 10:01:43.843807 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e27e9250-032d-44da-8819-ce560d4f9c3f-kube-api-access-v9xgh" (OuterVolumeSpecName: "kube-api-access-v9xgh") pod "e27e9250-032d-44da-8819-ce560d4f9c3f" (UID: "e27e9250-032d-44da-8819-ce560d4f9c3f"). InnerVolumeSpecName "kube-api-access-v9xgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:01:43 crc kubenswrapper[4854]: I1125 10:01:43.860510 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e27e9250-032d-44da-8819-ce560d4f9c3f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e27e9250-032d-44da-8819-ce560d4f9c3f" (UID: "e27e9250-032d-44da-8819-ce560d4f9c3f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:43 crc kubenswrapper[4854]: I1125 10:01:43.875265 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e27e9250-032d-44da-8819-ce560d4f9c3f-config-data" (OuterVolumeSpecName: "config-data") pod "e27e9250-032d-44da-8819-ce560d4f9c3f" (UID: "e27e9250-032d-44da-8819-ce560d4f9c3f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:43 crc kubenswrapper[4854]: I1125 10:01:43.927688 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e27e9250-032d-44da-8819-ce560d4f9c3f-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:43 crc kubenswrapper[4854]: I1125 10:01:43.927953 4854 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e27e9250-032d-44da-8819-ce560d4f9c3f-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:43 crc kubenswrapper[4854]: I1125 10:01:43.928066 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v9xgh\" (UniqueName: \"kubernetes.io/projected/e27e9250-032d-44da-8819-ce560d4f9c3f-kube-api-access-v9xgh\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:43 crc kubenswrapper[4854]: I1125 10:01:43.928145 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e27e9250-032d-44da-8819-ce560d4f9c3f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:44 crc kubenswrapper[4854]: I1125 10:01:44.027514 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 25 10:01:44 crc kubenswrapper[4854]: I1125 10:01:44.028074 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 25 10:01:44 crc kubenswrapper[4854]: I1125 10:01:44.034960 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 25 10:01:44 crc kubenswrapper[4854]: I1125 10:01:44.035975 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 25 10:01:44 crc kubenswrapper[4854]: I1125 10:01:44.192708 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"d92bb92b-df2f-4cf2-88c0-fe50081f16de","Type":"ContainerStarted","Data":"0dfa1885706bc95273f5a536117653ae1807edacd4c2ca9f6b6ee56ea1c80968"} Nov 25 10:01:44 crc kubenswrapper[4854]: I1125 10:01:44.195131 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-nl4hd" Nov 25 10:01:44 crc kubenswrapper[4854]: I1125 10:01:44.195136 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-nl4hd" event={"ID":"e27e9250-032d-44da-8819-ce560d4f9c3f","Type":"ContainerDied","Data":"cb4a40a73e9118fc621c05abd050f81b0f4220e21cfa7517d3fcb294bbb8a1af"} Nov 25 10:01:44 crc kubenswrapper[4854]: I1125 10:01:44.195195 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cb4a40a73e9118fc621c05abd050f81b0f4220e21cfa7517d3fcb294bbb8a1af" Nov 25 10:01:44 crc kubenswrapper[4854]: I1125 10:01:44.240609 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:01:44 crc kubenswrapper[4854]: I1125 10:01:44.241134 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="c8c89199-292c-4224-9034-550542c29686" containerName="nova-api-api" containerID="cri-o://de1de2c4cdfb82c523ce89f276c5cedbd84ca8ce8d8945a18651f06dd17ae6f8" gracePeriod=30 Nov 25 10:01:44 crc kubenswrapper[4854]: I1125 10:01:44.241152 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="c8c89199-292c-4224-9034-550542c29686" containerName="nova-api-log" containerID="cri-o://d91a6d1bb5fed2567d0a6f325fa09bb4a3a5b6141e3d95db4230477e1e0e7b6a" gracePeriod=30 Nov 25 10:01:44 crc kubenswrapper[4854]: I1125 10:01:44.258524 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:01:44 crc kubenswrapper[4854]: I1125 10:01:44.259380 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="df9fdfc5-3a37-43c1-a1a8-fc02c384e890" containerName="nova-scheduler-scheduler" containerID="cri-o://ddeffabc2059c9522eeee201abf6b72481deae653c553f7977927a6b1977925c" gracePeriod=30 Nov 25 10:01:44 crc kubenswrapper[4854]: I1125 10:01:44.281794 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:01:45 crc kubenswrapper[4854]: I1125 10:01:45.214643 4854 generic.go:334] "Generic (PLEG): container finished" podID="c8c89199-292c-4224-9034-550542c29686" containerID="d91a6d1bb5fed2567d0a6f325fa09bb4a3a5b6141e3d95db4230477e1e0e7b6a" exitCode=143 Nov 25 10:01:45 crc kubenswrapper[4854]: I1125 10:01:45.214882 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c8c89199-292c-4224-9034-550542c29686","Type":"ContainerDied","Data":"d91a6d1bb5fed2567d0a6f325fa09bb4a3a5b6141e3d95db4230477e1e0e7b6a"} Nov 25 10:01:45 crc kubenswrapper[4854]: I1125 10:01:45.218433 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"d92bb92b-df2f-4cf2-88c0-fe50081f16de","Type":"ContainerStarted","Data":"1450678da53a375fa08cc2b99d273ab199a7f517ef32a85340e23d71df01407b"} Nov 25 10:01:45 crc kubenswrapper[4854]: I1125 10:01:45.218705 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="376f9383-a48b-42f0-be32-cf24789335d8" containerName="nova-metadata-log" containerID="cri-o://0741b8580147ee1631d60908d95a4ce5e3d66089d43365d638738bcae2834e09" gracePeriod=30 Nov 25 10:01:45 crc kubenswrapper[4854]: I1125 10:01:45.218783 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="376f9383-a48b-42f0-be32-cf24789335d8" containerName="nova-metadata-metadata" containerID="cri-o://480399c4b75a4784fef4845fcc1e0dade244e04c1b38c64bc2c1094eac40e9de" gracePeriod=30 Nov 25 10:01:46 crc kubenswrapper[4854]: I1125 10:01:46.233294 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"d92bb92b-df2f-4cf2-88c0-fe50081f16de","Type":"ContainerStarted","Data":"0b9c5f8e77ea994bde66ef17be03991b2956a962da365e3c1d94679f80f4f14e"} Nov 25 10:01:46 crc kubenswrapper[4854]: I1125 10:01:46.236031 4854 generic.go:334] "Generic (PLEG): container finished" podID="376f9383-a48b-42f0-be32-cf24789335d8" containerID="0741b8580147ee1631d60908d95a4ce5e3d66089d43365d638738bcae2834e09" exitCode=143 Nov 25 10:01:46 crc kubenswrapper[4854]: I1125 10:01:46.236062 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"376f9383-a48b-42f0-be32-cf24789335d8","Type":"ContainerDied","Data":"0741b8580147ee1631d60908d95a4ce5e3d66089d43365d638738bcae2834e09"} Nov 25 10:01:46 crc kubenswrapper[4854]: E1125 10:01:46.560830 4854 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ddeffabc2059c9522eeee201abf6b72481deae653c553f7977927a6b1977925c" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 10:01:46 crc kubenswrapper[4854]: E1125 10:01:46.562195 4854 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ddeffabc2059c9522eeee201abf6b72481deae653c553f7977927a6b1977925c" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 10:01:46 crc kubenswrapper[4854]: E1125 10:01:46.563406 4854 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ddeffabc2059c9522eeee201abf6b72481deae653c553f7977927a6b1977925c" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 10:01:46 crc kubenswrapper[4854]: E1125 10:01:46.563446 4854 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="df9fdfc5-3a37-43c1-a1a8-fc02c384e890" containerName="nova-scheduler-scheduler" Nov 25 10:01:47 crc kubenswrapper[4854]: I1125 10:01:47.251453 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"d92bb92b-df2f-4cf2-88c0-fe50081f16de","Type":"ContainerStarted","Data":"c9e498c4bfb41047ac38d60e2d074a3368002a4bfeb64f641fe152ce27c220ea"} Nov 25 10:01:47 crc kubenswrapper[4854]: I1125 10:01:47.278813 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=2.056032484 podStartE2EDuration="5.278794381s" podCreationTimestamp="2025-11-25 10:01:42 +0000 UTC" firstStartedPulling="2025-11-25 10:01:43.067170614 +0000 UTC m=+1508.920163990" lastFinishedPulling="2025-11-25 10:01:46.289932511 +0000 UTC m=+1512.142925887" observedRunningTime="2025-11-25 10:01:47.27729246 +0000 UTC m=+1513.130285846" watchObservedRunningTime="2025-11-25 10:01:47.278794381 +0000 UTC m=+1513.131787767" Nov 25 10:01:47 crc kubenswrapper[4854]: I1125 10:01:47.900485 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.038535 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c8c89199-292c-4224-9034-550542c29686-internal-tls-certs\") pod \"c8c89199-292c-4224-9034-550542c29686\" (UID: \"c8c89199-292c-4224-9034-550542c29686\") " Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.038711 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8c89199-292c-4224-9034-550542c29686-combined-ca-bundle\") pod \"c8c89199-292c-4224-9034-550542c29686\" (UID: \"c8c89199-292c-4224-9034-550542c29686\") " Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.038834 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c8c89199-292c-4224-9034-550542c29686-public-tls-certs\") pod \"c8c89199-292c-4224-9034-550542c29686\" (UID: \"c8c89199-292c-4224-9034-550542c29686\") " Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.039126 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p8mvd\" (UniqueName: \"kubernetes.io/projected/c8c89199-292c-4224-9034-550542c29686-kube-api-access-p8mvd\") pod \"c8c89199-292c-4224-9034-550542c29686\" (UID: \"c8c89199-292c-4224-9034-550542c29686\") " Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.039187 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8c89199-292c-4224-9034-550542c29686-config-data\") pod \"c8c89199-292c-4224-9034-550542c29686\" (UID: \"c8c89199-292c-4224-9034-550542c29686\") " Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.039258 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c8c89199-292c-4224-9034-550542c29686-logs\") pod \"c8c89199-292c-4224-9034-550542c29686\" (UID: \"c8c89199-292c-4224-9034-550542c29686\") " Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.039625 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c8c89199-292c-4224-9034-550542c29686-logs" (OuterVolumeSpecName: "logs") pod "c8c89199-292c-4224-9034-550542c29686" (UID: "c8c89199-292c-4224-9034-550542c29686"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.040270 4854 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c8c89199-292c-4224-9034-550542c29686-logs\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.046650 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c8c89199-292c-4224-9034-550542c29686-kube-api-access-p8mvd" (OuterVolumeSpecName: "kube-api-access-p8mvd") pod "c8c89199-292c-4224-9034-550542c29686" (UID: "c8c89199-292c-4224-9034-550542c29686"). InnerVolumeSpecName "kube-api-access-p8mvd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.079990 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8c89199-292c-4224-9034-550542c29686-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c8c89199-292c-4224-9034-550542c29686" (UID: "c8c89199-292c-4224-9034-550542c29686"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.081528 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8c89199-292c-4224-9034-550542c29686-config-data" (OuterVolumeSpecName: "config-data") pod "c8c89199-292c-4224-9034-550542c29686" (UID: "c8c89199-292c-4224-9034-550542c29686"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.115815 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8c89199-292c-4224-9034-550542c29686-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "c8c89199-292c-4224-9034-550542c29686" (UID: "c8c89199-292c-4224-9034-550542c29686"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.123375 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8c89199-292c-4224-9034-550542c29686-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "c8c89199-292c-4224-9034-550542c29686" (UID: "c8c89199-292c-4224-9034-550542c29686"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.142965 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p8mvd\" (UniqueName: \"kubernetes.io/projected/c8c89199-292c-4224-9034-550542c29686-kube-api-access-p8mvd\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.143011 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8c89199-292c-4224-9034-550542c29686-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.143032 4854 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c8c89199-292c-4224-9034-550542c29686-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.143055 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8c89199-292c-4224-9034-550542c29686-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.143074 4854 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c8c89199-292c-4224-9034-550542c29686-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.274061 4854 generic.go:334] "Generic (PLEG): container finished" podID="c8c89199-292c-4224-9034-550542c29686" containerID="de1de2c4cdfb82c523ce89f276c5cedbd84ca8ce8d8945a18651f06dd17ae6f8" exitCode=0 Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.280522 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c8c89199-292c-4224-9034-550542c29686","Type":"ContainerDied","Data":"de1de2c4cdfb82c523ce89f276c5cedbd84ca8ce8d8945a18651f06dd17ae6f8"} Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.280614 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c8c89199-292c-4224-9034-550542c29686","Type":"ContainerDied","Data":"a4a9a070aa48ce40c1c3dfddfce468ebf3bf4fa3a1e6549c1218f32b57ab984b"} Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.280640 4854 scope.go:117] "RemoveContainer" containerID="de1de2c4cdfb82c523ce89f276c5cedbd84ca8ce8d8945a18651f06dd17ae6f8" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.281105 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.317997 4854 scope.go:117] "RemoveContainer" containerID="d91a6d1bb5fed2567d0a6f325fa09bb4a3a5b6141e3d95db4230477e1e0e7b6a" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.340462 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.351962 4854 scope.go:117] "RemoveContainer" containerID="de1de2c4cdfb82c523ce89f276c5cedbd84ca8ce8d8945a18651f06dd17ae6f8" Nov 25 10:01:48 crc kubenswrapper[4854]: E1125 10:01:48.352525 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"de1de2c4cdfb82c523ce89f276c5cedbd84ca8ce8d8945a18651f06dd17ae6f8\": container with ID starting with de1de2c4cdfb82c523ce89f276c5cedbd84ca8ce8d8945a18651f06dd17ae6f8 not found: ID does not exist" containerID="de1de2c4cdfb82c523ce89f276c5cedbd84ca8ce8d8945a18651f06dd17ae6f8" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.352553 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de1de2c4cdfb82c523ce89f276c5cedbd84ca8ce8d8945a18651f06dd17ae6f8"} err="failed to get container status \"de1de2c4cdfb82c523ce89f276c5cedbd84ca8ce8d8945a18651f06dd17ae6f8\": rpc error: code = NotFound desc = could not find container \"de1de2c4cdfb82c523ce89f276c5cedbd84ca8ce8d8945a18651f06dd17ae6f8\": container with ID starting with de1de2c4cdfb82c523ce89f276c5cedbd84ca8ce8d8945a18651f06dd17ae6f8 not found: ID does not exist" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.352574 4854 scope.go:117] "RemoveContainer" containerID="d91a6d1bb5fed2567d0a6f325fa09bb4a3a5b6141e3d95db4230477e1e0e7b6a" Nov 25 10:01:48 crc kubenswrapper[4854]: E1125 10:01:48.352962 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d91a6d1bb5fed2567d0a6f325fa09bb4a3a5b6141e3d95db4230477e1e0e7b6a\": container with ID starting with d91a6d1bb5fed2567d0a6f325fa09bb4a3a5b6141e3d95db4230477e1e0e7b6a not found: ID does not exist" containerID="d91a6d1bb5fed2567d0a6f325fa09bb4a3a5b6141e3d95db4230477e1e0e7b6a" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.352996 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d91a6d1bb5fed2567d0a6f325fa09bb4a3a5b6141e3d95db4230477e1e0e7b6a"} err="failed to get container status \"d91a6d1bb5fed2567d0a6f325fa09bb4a3a5b6141e3d95db4230477e1e0e7b6a\": rpc error: code = NotFound desc = could not find container \"d91a6d1bb5fed2567d0a6f325fa09bb4a3a5b6141e3d95db4230477e1e0e7b6a\": container with ID starting with d91a6d1bb5fed2567d0a6f325fa09bb4a3a5b6141e3d95db4230477e1e0e7b6a not found: ID does not exist" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.361778 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.384619 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 10:01:48 crc kubenswrapper[4854]: E1125 10:01:48.385078 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8c89199-292c-4224-9034-550542c29686" containerName="nova-api-log" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.385092 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8c89199-292c-4224-9034-550542c29686" containerName="nova-api-log" Nov 25 10:01:48 crc kubenswrapper[4854]: E1125 10:01:48.385120 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8c89199-292c-4224-9034-550542c29686" containerName="nova-api-api" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.385125 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8c89199-292c-4224-9034-550542c29686" containerName="nova-api-api" Nov 25 10:01:48 crc kubenswrapper[4854]: E1125 10:01:48.385143 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e27e9250-032d-44da-8819-ce560d4f9c3f" containerName="nova-manage" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.385149 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="e27e9250-032d-44da-8819-ce560d4f9c3f" containerName="nova-manage" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.385561 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="c8c89199-292c-4224-9034-550542c29686" containerName="nova-api-api" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.385581 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="e27e9250-032d-44da-8819-ce560d4f9c3f" containerName="nova-manage" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.385596 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="c8c89199-292c-4224-9034-550542c29686" containerName="nova-api-log" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.386775 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.386843 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.414127 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.414173 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.414400 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.553013 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d276cec0-76c5-44c3-91a5-669c20aaba25-public-tls-certs\") pod \"nova-api-0\" (UID: \"d276cec0-76c5-44c3-91a5-669c20aaba25\") " pod="openstack/nova-api-0" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.553150 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d276cec0-76c5-44c3-91a5-669c20aaba25-config-data\") pod \"nova-api-0\" (UID: \"d276cec0-76c5-44c3-91a5-669c20aaba25\") " pod="openstack/nova-api-0" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.553212 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d276cec0-76c5-44c3-91a5-669c20aaba25-logs\") pod \"nova-api-0\" (UID: \"d276cec0-76c5-44c3-91a5-669c20aaba25\") " pod="openstack/nova-api-0" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.553282 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d276cec0-76c5-44c3-91a5-669c20aaba25-internal-tls-certs\") pod \"nova-api-0\" (UID: \"d276cec0-76c5-44c3-91a5-669c20aaba25\") " pod="openstack/nova-api-0" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.553384 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d276cec0-76c5-44c3-91a5-669c20aaba25-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d276cec0-76c5-44c3-91a5-669c20aaba25\") " pod="openstack/nova-api-0" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.553439 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nqd2q\" (UniqueName: \"kubernetes.io/projected/d276cec0-76c5-44c3-91a5-669c20aaba25-kube-api-access-nqd2q\") pod \"nova-api-0\" (UID: \"d276cec0-76c5-44c3-91a5-669c20aaba25\") " pod="openstack/nova-api-0" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.655356 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d276cec0-76c5-44c3-91a5-669c20aaba25-public-tls-certs\") pod \"nova-api-0\" (UID: \"d276cec0-76c5-44c3-91a5-669c20aaba25\") " pod="openstack/nova-api-0" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.655712 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d276cec0-76c5-44c3-91a5-669c20aaba25-config-data\") pod \"nova-api-0\" (UID: \"d276cec0-76c5-44c3-91a5-669c20aaba25\") " pod="openstack/nova-api-0" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.655762 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d276cec0-76c5-44c3-91a5-669c20aaba25-logs\") pod \"nova-api-0\" (UID: \"d276cec0-76c5-44c3-91a5-669c20aaba25\") " pod="openstack/nova-api-0" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.655813 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d276cec0-76c5-44c3-91a5-669c20aaba25-internal-tls-certs\") pod \"nova-api-0\" (UID: \"d276cec0-76c5-44c3-91a5-669c20aaba25\") " pod="openstack/nova-api-0" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.655886 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d276cec0-76c5-44c3-91a5-669c20aaba25-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d276cec0-76c5-44c3-91a5-669c20aaba25\") " pod="openstack/nova-api-0" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.656211 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d276cec0-76c5-44c3-91a5-669c20aaba25-logs\") pod \"nova-api-0\" (UID: \"d276cec0-76c5-44c3-91a5-669c20aaba25\") " pod="openstack/nova-api-0" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.655937 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nqd2q\" (UniqueName: \"kubernetes.io/projected/d276cec0-76c5-44c3-91a5-669c20aaba25-kube-api-access-nqd2q\") pod \"nova-api-0\" (UID: \"d276cec0-76c5-44c3-91a5-669c20aaba25\") " pod="openstack/nova-api-0" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.668016 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d276cec0-76c5-44c3-91a5-669c20aaba25-internal-tls-certs\") pod \"nova-api-0\" (UID: \"d276cec0-76c5-44c3-91a5-669c20aaba25\") " pod="openstack/nova-api-0" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.679164 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d276cec0-76c5-44c3-91a5-669c20aaba25-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d276cec0-76c5-44c3-91a5-669c20aaba25\") " pod="openstack/nova-api-0" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.679730 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d276cec0-76c5-44c3-91a5-669c20aaba25-public-tls-certs\") pod \"nova-api-0\" (UID: \"d276cec0-76c5-44c3-91a5-669c20aaba25\") " pod="openstack/nova-api-0" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.682225 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d276cec0-76c5-44c3-91a5-669c20aaba25-config-data\") pod \"nova-api-0\" (UID: \"d276cec0-76c5-44c3-91a5-669c20aaba25\") " pod="openstack/nova-api-0" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.687243 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nqd2q\" (UniqueName: \"kubernetes.io/projected/d276cec0-76c5-44c3-91a5-669c20aaba25-kube-api-access-nqd2q\") pod \"nova-api-0\" (UID: \"d276cec0-76c5-44c3-91a5-669c20aaba25\") " pod="openstack/nova-api-0" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.841551 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:01:48 crc kubenswrapper[4854]: I1125 10:01:48.906793 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.032747 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c8c89199-292c-4224-9034-550542c29686" path="/var/lib/kubelet/pods/c8c89199-292c-4224-9034-550542c29686/volumes" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.066807 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/376f9383-a48b-42f0-be32-cf24789335d8-combined-ca-bundle\") pod \"376f9383-a48b-42f0-be32-cf24789335d8\" (UID: \"376f9383-a48b-42f0-be32-cf24789335d8\") " Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.066909 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hjsrm\" (UniqueName: \"kubernetes.io/projected/376f9383-a48b-42f0-be32-cf24789335d8-kube-api-access-hjsrm\") pod \"376f9383-a48b-42f0-be32-cf24789335d8\" (UID: \"376f9383-a48b-42f0-be32-cf24789335d8\") " Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.066970 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/376f9383-a48b-42f0-be32-cf24789335d8-logs\") pod \"376f9383-a48b-42f0-be32-cf24789335d8\" (UID: \"376f9383-a48b-42f0-be32-cf24789335d8\") " Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.067157 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/376f9383-a48b-42f0-be32-cf24789335d8-nova-metadata-tls-certs\") pod \"376f9383-a48b-42f0-be32-cf24789335d8\" (UID: \"376f9383-a48b-42f0-be32-cf24789335d8\") " Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.067282 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/376f9383-a48b-42f0-be32-cf24789335d8-config-data\") pod \"376f9383-a48b-42f0-be32-cf24789335d8\" (UID: \"376f9383-a48b-42f0-be32-cf24789335d8\") " Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.067881 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/376f9383-a48b-42f0-be32-cf24789335d8-logs" (OuterVolumeSpecName: "logs") pod "376f9383-a48b-42f0-be32-cf24789335d8" (UID: "376f9383-a48b-42f0-be32-cf24789335d8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.071709 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/376f9383-a48b-42f0-be32-cf24789335d8-kube-api-access-hjsrm" (OuterVolumeSpecName: "kube-api-access-hjsrm") pod "376f9383-a48b-42f0-be32-cf24789335d8" (UID: "376f9383-a48b-42f0-be32-cf24789335d8"). InnerVolumeSpecName "kube-api-access-hjsrm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.108895 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/376f9383-a48b-42f0-be32-cf24789335d8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "376f9383-a48b-42f0-be32-cf24789335d8" (UID: "376f9383-a48b-42f0-be32-cf24789335d8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.109814 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/376f9383-a48b-42f0-be32-cf24789335d8-config-data" (OuterVolumeSpecName: "config-data") pod "376f9383-a48b-42f0-be32-cf24789335d8" (UID: "376f9383-a48b-42f0-be32-cf24789335d8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.133698 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/376f9383-a48b-42f0-be32-cf24789335d8-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "376f9383-a48b-42f0-be32-cf24789335d8" (UID: "376f9383-a48b-42f0-be32-cf24789335d8"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.169948 4854 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/376f9383-a48b-42f0-be32-cf24789335d8-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.169988 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/376f9383-a48b-42f0-be32-cf24789335d8-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.169998 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/376f9383-a48b-42f0-be32-cf24789335d8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.170007 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hjsrm\" (UniqueName: \"kubernetes.io/projected/376f9383-a48b-42f0-be32-cf24789335d8-kube-api-access-hjsrm\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.170015 4854 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/376f9383-a48b-42f0-be32-cf24789335d8-logs\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.292604 4854 generic.go:334] "Generic (PLEG): container finished" podID="376f9383-a48b-42f0-be32-cf24789335d8" containerID="480399c4b75a4784fef4845fcc1e0dade244e04c1b38c64bc2c1094eac40e9de" exitCode=0 Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.292655 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"376f9383-a48b-42f0-be32-cf24789335d8","Type":"ContainerDied","Data":"480399c4b75a4784fef4845fcc1e0dade244e04c1b38c64bc2c1094eac40e9de"} Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.292766 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"376f9383-a48b-42f0-be32-cf24789335d8","Type":"ContainerDied","Data":"ea8a247c7b143769a034b223c9d1ad56a8d0b1527b01a2e1ae0915d030820e94"} Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.292792 4854 scope.go:117] "RemoveContainer" containerID="480399c4b75a4784fef4845fcc1e0dade244e04c1b38c64bc2c1094eac40e9de" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.292683 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.335593 4854 scope.go:117] "RemoveContainer" containerID="0741b8580147ee1631d60908d95a4ce5e3d66089d43365d638738bcae2834e09" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.362821 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.366712 4854 scope.go:117] "RemoveContainer" containerID="480399c4b75a4784fef4845fcc1e0dade244e04c1b38c64bc2c1094eac40e9de" Nov 25 10:01:49 crc kubenswrapper[4854]: E1125 10:01:49.367924 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"480399c4b75a4784fef4845fcc1e0dade244e04c1b38c64bc2c1094eac40e9de\": container with ID starting with 480399c4b75a4784fef4845fcc1e0dade244e04c1b38c64bc2c1094eac40e9de not found: ID does not exist" containerID="480399c4b75a4784fef4845fcc1e0dade244e04c1b38c64bc2c1094eac40e9de" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.367971 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"480399c4b75a4784fef4845fcc1e0dade244e04c1b38c64bc2c1094eac40e9de"} err="failed to get container status \"480399c4b75a4784fef4845fcc1e0dade244e04c1b38c64bc2c1094eac40e9de\": rpc error: code = NotFound desc = could not find container \"480399c4b75a4784fef4845fcc1e0dade244e04c1b38c64bc2c1094eac40e9de\": container with ID starting with 480399c4b75a4784fef4845fcc1e0dade244e04c1b38c64bc2c1094eac40e9de not found: ID does not exist" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.367999 4854 scope.go:117] "RemoveContainer" containerID="0741b8580147ee1631d60908d95a4ce5e3d66089d43365d638738bcae2834e09" Nov 25 10:01:49 crc kubenswrapper[4854]: E1125 10:01:49.370036 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0741b8580147ee1631d60908d95a4ce5e3d66089d43365d638738bcae2834e09\": container with ID starting with 0741b8580147ee1631d60908d95a4ce5e3d66089d43365d638738bcae2834e09 not found: ID does not exist" containerID="0741b8580147ee1631d60908d95a4ce5e3d66089d43365d638738bcae2834e09" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.370065 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0741b8580147ee1631d60908d95a4ce5e3d66089d43365d638738bcae2834e09"} err="failed to get container status \"0741b8580147ee1631d60908d95a4ce5e3d66089d43365d638738bcae2834e09\": rpc error: code = NotFound desc = could not find container \"0741b8580147ee1631d60908d95a4ce5e3d66089d43365d638738bcae2834e09\": container with ID starting with 0741b8580147ee1631d60908d95a4ce5e3d66089d43365d638738bcae2834e09 not found: ID does not exist" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.403542 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.426827 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:01:49 crc kubenswrapper[4854]: E1125 10:01:49.427846 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="376f9383-a48b-42f0-be32-cf24789335d8" containerName="nova-metadata-log" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.427863 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="376f9383-a48b-42f0-be32-cf24789335d8" containerName="nova-metadata-log" Nov 25 10:01:49 crc kubenswrapper[4854]: E1125 10:01:49.427882 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="376f9383-a48b-42f0-be32-cf24789335d8" containerName="nova-metadata-metadata" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.427888 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="376f9383-a48b-42f0-be32-cf24789335d8" containerName="nova-metadata-metadata" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.428166 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="376f9383-a48b-42f0-be32-cf24789335d8" containerName="nova-metadata-metadata" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.428184 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="376f9383-a48b-42f0-be32-cf24789335d8" containerName="nova-metadata-log" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.429516 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.432649 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.433055 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.443645 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.459929 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.583625 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/abaf3b4c-e24e-4387-950e-b0b50ed7d0e5-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"abaf3b4c-e24e-4387-950e-b0b50ed7d0e5\") " pod="openstack/nova-metadata-0" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.583986 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/abaf3b4c-e24e-4387-950e-b0b50ed7d0e5-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"abaf3b4c-e24e-4387-950e-b0b50ed7d0e5\") " pod="openstack/nova-metadata-0" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.584121 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lzbvs\" (UniqueName: \"kubernetes.io/projected/abaf3b4c-e24e-4387-950e-b0b50ed7d0e5-kube-api-access-lzbvs\") pod \"nova-metadata-0\" (UID: \"abaf3b4c-e24e-4387-950e-b0b50ed7d0e5\") " pod="openstack/nova-metadata-0" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.584176 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/abaf3b4c-e24e-4387-950e-b0b50ed7d0e5-config-data\") pod \"nova-metadata-0\" (UID: \"abaf3b4c-e24e-4387-950e-b0b50ed7d0e5\") " pod="openstack/nova-metadata-0" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.584231 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/abaf3b4c-e24e-4387-950e-b0b50ed7d0e5-logs\") pod \"nova-metadata-0\" (UID: \"abaf3b4c-e24e-4387-950e-b0b50ed7d0e5\") " pod="openstack/nova-metadata-0" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.686229 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lzbvs\" (UniqueName: \"kubernetes.io/projected/abaf3b4c-e24e-4387-950e-b0b50ed7d0e5-kube-api-access-lzbvs\") pod \"nova-metadata-0\" (UID: \"abaf3b4c-e24e-4387-950e-b0b50ed7d0e5\") " pod="openstack/nova-metadata-0" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.686304 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/abaf3b4c-e24e-4387-950e-b0b50ed7d0e5-config-data\") pod \"nova-metadata-0\" (UID: \"abaf3b4c-e24e-4387-950e-b0b50ed7d0e5\") " pod="openstack/nova-metadata-0" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.686358 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/abaf3b4c-e24e-4387-950e-b0b50ed7d0e5-logs\") pod \"nova-metadata-0\" (UID: \"abaf3b4c-e24e-4387-950e-b0b50ed7d0e5\") " pod="openstack/nova-metadata-0" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.686400 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/abaf3b4c-e24e-4387-950e-b0b50ed7d0e5-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"abaf3b4c-e24e-4387-950e-b0b50ed7d0e5\") " pod="openstack/nova-metadata-0" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.686434 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/abaf3b4c-e24e-4387-950e-b0b50ed7d0e5-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"abaf3b4c-e24e-4387-950e-b0b50ed7d0e5\") " pod="openstack/nova-metadata-0" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.687750 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/abaf3b4c-e24e-4387-950e-b0b50ed7d0e5-logs\") pod \"nova-metadata-0\" (UID: \"abaf3b4c-e24e-4387-950e-b0b50ed7d0e5\") " pod="openstack/nova-metadata-0" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.692501 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/abaf3b4c-e24e-4387-950e-b0b50ed7d0e5-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"abaf3b4c-e24e-4387-950e-b0b50ed7d0e5\") " pod="openstack/nova-metadata-0" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.692947 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/abaf3b4c-e24e-4387-950e-b0b50ed7d0e5-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"abaf3b4c-e24e-4387-950e-b0b50ed7d0e5\") " pod="openstack/nova-metadata-0" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.694838 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/abaf3b4c-e24e-4387-950e-b0b50ed7d0e5-config-data\") pod \"nova-metadata-0\" (UID: \"abaf3b4c-e24e-4387-950e-b0b50ed7d0e5\") " pod="openstack/nova-metadata-0" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.708147 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lzbvs\" (UniqueName: \"kubernetes.io/projected/abaf3b4c-e24e-4387-950e-b0b50ed7d0e5-kube-api-access-lzbvs\") pod \"nova-metadata-0\" (UID: \"abaf3b4c-e24e-4387-950e-b0b50ed7d0e5\") " pod="openstack/nova-metadata-0" Nov 25 10:01:49 crc kubenswrapper[4854]: I1125 10:01:49.756838 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.034560 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.200022 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df9fdfc5-3a37-43c1-a1a8-fc02c384e890-combined-ca-bundle\") pod \"df9fdfc5-3a37-43c1-a1a8-fc02c384e890\" (UID: \"df9fdfc5-3a37-43c1-a1a8-fc02c384e890\") " Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.200216 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pnks6\" (UniqueName: \"kubernetes.io/projected/df9fdfc5-3a37-43c1-a1a8-fc02c384e890-kube-api-access-pnks6\") pod \"df9fdfc5-3a37-43c1-a1a8-fc02c384e890\" (UID: \"df9fdfc5-3a37-43c1-a1a8-fc02c384e890\") " Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.200309 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df9fdfc5-3a37-43c1-a1a8-fc02c384e890-config-data\") pod \"df9fdfc5-3a37-43c1-a1a8-fc02c384e890\" (UID: \"df9fdfc5-3a37-43c1-a1a8-fc02c384e890\") " Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.205321 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/df9fdfc5-3a37-43c1-a1a8-fc02c384e890-kube-api-access-pnks6" (OuterVolumeSpecName: "kube-api-access-pnks6") pod "df9fdfc5-3a37-43c1-a1a8-fc02c384e890" (UID: "df9fdfc5-3a37-43c1-a1a8-fc02c384e890"). InnerVolumeSpecName "kube-api-access-pnks6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.234901 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/df9fdfc5-3a37-43c1-a1a8-fc02c384e890-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "df9fdfc5-3a37-43c1-a1a8-fc02c384e890" (UID: "df9fdfc5-3a37-43c1-a1a8-fc02c384e890"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.245812 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/df9fdfc5-3a37-43c1-a1a8-fc02c384e890-config-data" (OuterVolumeSpecName: "config-data") pod "df9fdfc5-3a37-43c1-a1a8-fc02c384e890" (UID: "df9fdfc5-3a37-43c1-a1a8-fc02c384e890"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.303685 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pnks6\" (UniqueName: \"kubernetes.io/projected/df9fdfc5-3a37-43c1-a1a8-fc02c384e890-kube-api-access-pnks6\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.303714 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df9fdfc5-3a37-43c1-a1a8-fc02c384e890-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.303724 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df9fdfc5-3a37-43c1-a1a8-fc02c384e890-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.308658 4854 generic.go:334] "Generic (PLEG): container finished" podID="df9fdfc5-3a37-43c1-a1a8-fc02c384e890" containerID="ddeffabc2059c9522eeee201abf6b72481deae653c553f7977927a6b1977925c" exitCode=0 Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.308763 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.308772 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"df9fdfc5-3a37-43c1-a1a8-fc02c384e890","Type":"ContainerDied","Data":"ddeffabc2059c9522eeee201abf6b72481deae653c553f7977927a6b1977925c"} Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.309889 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"df9fdfc5-3a37-43c1-a1a8-fc02c384e890","Type":"ContainerDied","Data":"f338fcb4d0d656ac07ccbab2d3676fae02e62dbc3ee3a6ea7a0b4b7d0785ae8b"} Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.309924 4854 scope.go:117] "RemoveContainer" containerID="ddeffabc2059c9522eeee201abf6b72481deae653c553f7977927a6b1977925c" Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.313425 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d276cec0-76c5-44c3-91a5-669c20aaba25","Type":"ContainerStarted","Data":"aea18254496e41b3436b73d74d1eca37edbf94a9863555c95686147bcc5bc22a"} Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.313486 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d276cec0-76c5-44c3-91a5-669c20aaba25","Type":"ContainerStarted","Data":"08db023663bc16e196e3f84d991c22200f542d63ea02b5fb55de5acad6d851ba"} Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.313501 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d276cec0-76c5-44c3-91a5-669c20aaba25","Type":"ContainerStarted","Data":"cab04101132e9e54f61ae950ddcc78fbd2329903d80aebe9225db9b5756cdcf1"} Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.337832 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.337790287 podStartE2EDuration="2.337790287s" podCreationTimestamp="2025-11-25 10:01:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:01:50.329303412 +0000 UTC m=+1516.182296798" watchObservedRunningTime="2025-11-25 10:01:50.337790287 +0000 UTC m=+1516.190783673" Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.367846 4854 scope.go:117] "RemoveContainer" containerID="ddeffabc2059c9522eeee201abf6b72481deae653c553f7977927a6b1977925c" Nov 25 10:01:50 crc kubenswrapper[4854]: E1125 10:01:50.368326 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ddeffabc2059c9522eeee201abf6b72481deae653c553f7977927a6b1977925c\": container with ID starting with ddeffabc2059c9522eeee201abf6b72481deae653c553f7977927a6b1977925c not found: ID does not exist" containerID="ddeffabc2059c9522eeee201abf6b72481deae653c553f7977927a6b1977925c" Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.368365 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ddeffabc2059c9522eeee201abf6b72481deae653c553f7977927a6b1977925c"} err="failed to get container status \"ddeffabc2059c9522eeee201abf6b72481deae653c553f7977927a6b1977925c\": rpc error: code = NotFound desc = could not find container \"ddeffabc2059c9522eeee201abf6b72481deae653c553f7977927a6b1977925c\": container with ID starting with ddeffabc2059c9522eeee201abf6b72481deae653c553f7977927a6b1977925c not found: ID does not exist" Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.369749 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.429430 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:01:50 crc kubenswrapper[4854]: W1125 10:01:50.437501 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podabaf3b4c_e24e_4387_950e_b0b50ed7d0e5.slice/crio-ef30571f71ab284545aaf9a5179209d1ae52a341d456a05faca74e4c0d15d6ff WatchSource:0}: Error finding container ef30571f71ab284545aaf9a5179209d1ae52a341d456a05faca74e4c0d15d6ff: Status 404 returned error can't find the container with id ef30571f71ab284545aaf9a5179209d1ae52a341d456a05faca74e4c0d15d6ff Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.445816 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:01:50 crc kubenswrapper[4854]: E1125 10:01:50.446384 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df9fdfc5-3a37-43c1-a1a8-fc02c384e890" containerName="nova-scheduler-scheduler" Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.446411 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="df9fdfc5-3a37-43c1-a1a8-fc02c384e890" containerName="nova-scheduler-scheduler" Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.447019 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="df9fdfc5-3a37-43c1-a1a8-fc02c384e890" containerName="nova-scheduler-scheduler" Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.447874 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.456150 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.464283 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.476681 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.622933 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c760277-34b8-4444-9c0f-c5a7b572f4ed-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3c760277-34b8-4444-9c0f-c5a7b572f4ed\") " pod="openstack/nova-scheduler-0" Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.623052 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c760277-34b8-4444-9c0f-c5a7b572f4ed-config-data\") pod \"nova-scheduler-0\" (UID: \"3c760277-34b8-4444-9c0f-c5a7b572f4ed\") " pod="openstack/nova-scheduler-0" Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.623079 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kt4vx\" (UniqueName: \"kubernetes.io/projected/3c760277-34b8-4444-9c0f-c5a7b572f4ed-kube-api-access-kt4vx\") pod \"nova-scheduler-0\" (UID: \"3c760277-34b8-4444-9c0f-c5a7b572f4ed\") " pod="openstack/nova-scheduler-0" Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.724699 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c760277-34b8-4444-9c0f-c5a7b572f4ed-config-data\") pod \"nova-scheduler-0\" (UID: \"3c760277-34b8-4444-9c0f-c5a7b572f4ed\") " pod="openstack/nova-scheduler-0" Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.724759 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kt4vx\" (UniqueName: \"kubernetes.io/projected/3c760277-34b8-4444-9c0f-c5a7b572f4ed-kube-api-access-kt4vx\") pod \"nova-scheduler-0\" (UID: \"3c760277-34b8-4444-9c0f-c5a7b572f4ed\") " pod="openstack/nova-scheduler-0" Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.724902 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c760277-34b8-4444-9c0f-c5a7b572f4ed-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3c760277-34b8-4444-9c0f-c5a7b572f4ed\") " pod="openstack/nova-scheduler-0" Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.729054 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c760277-34b8-4444-9c0f-c5a7b572f4ed-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3c760277-34b8-4444-9c0f-c5a7b572f4ed\") " pod="openstack/nova-scheduler-0" Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.730637 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c760277-34b8-4444-9c0f-c5a7b572f4ed-config-data\") pod \"nova-scheduler-0\" (UID: \"3c760277-34b8-4444-9c0f-c5a7b572f4ed\") " pod="openstack/nova-scheduler-0" Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.749822 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kt4vx\" (UniqueName: \"kubernetes.io/projected/3c760277-34b8-4444-9c0f-c5a7b572f4ed-kube-api-access-kt4vx\") pod \"nova-scheduler-0\" (UID: \"3c760277-34b8-4444-9c0f-c5a7b572f4ed\") " pod="openstack/nova-scheduler-0" Nov 25 10:01:50 crc kubenswrapper[4854]: I1125 10:01:50.880514 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 10:01:51 crc kubenswrapper[4854]: I1125 10:01:51.026733 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="376f9383-a48b-42f0-be32-cf24789335d8" path="/var/lib/kubelet/pods/376f9383-a48b-42f0-be32-cf24789335d8/volumes" Nov 25 10:01:51 crc kubenswrapper[4854]: I1125 10:01:51.027994 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="df9fdfc5-3a37-43c1-a1a8-fc02c384e890" path="/var/lib/kubelet/pods/df9fdfc5-3a37-43c1-a1a8-fc02c384e890/volumes" Nov 25 10:01:51 crc kubenswrapper[4854]: I1125 10:01:51.338481 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"abaf3b4c-e24e-4387-950e-b0b50ed7d0e5","Type":"ContainerStarted","Data":"9ebfcb993926b9ee5e55b1af0f17cf4190284ba5939ab23f6ae357851f4cdaf9"} Nov 25 10:01:51 crc kubenswrapper[4854]: I1125 10:01:51.338931 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"abaf3b4c-e24e-4387-950e-b0b50ed7d0e5","Type":"ContainerStarted","Data":"59f75ec1febef3d7f251d27738b6ca94bd1e8d04e1cacc9394bfb528a6454244"} Nov 25 10:01:51 crc kubenswrapper[4854]: I1125 10:01:51.338965 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"abaf3b4c-e24e-4387-950e-b0b50ed7d0e5","Type":"ContainerStarted","Data":"ef30571f71ab284545aaf9a5179209d1ae52a341d456a05faca74e4c0d15d6ff"} Nov 25 10:01:51 crc kubenswrapper[4854]: W1125 10:01:51.350364 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3c760277_34b8_4444_9c0f_c5a7b572f4ed.slice/crio-eec19b0ac452c88d19babff2a178063671a7b795298ff43b465ca8a5bbf01c4e WatchSource:0}: Error finding container eec19b0ac452c88d19babff2a178063671a7b795298ff43b465ca8a5bbf01c4e: Status 404 returned error can't find the container with id eec19b0ac452c88d19babff2a178063671a7b795298ff43b465ca8a5bbf01c4e Nov 25 10:01:51 crc kubenswrapper[4854]: I1125 10:01:51.360435 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:01:51 crc kubenswrapper[4854]: I1125 10:01:51.366612 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.366584842 podStartE2EDuration="2.366584842s" podCreationTimestamp="2025-11-25 10:01:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:01:51.360828622 +0000 UTC m=+1517.213821998" watchObservedRunningTime="2025-11-25 10:01:51.366584842 +0000 UTC m=+1517.219578238" Nov 25 10:01:52 crc kubenswrapper[4854]: I1125 10:01:52.353879 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3c760277-34b8-4444-9c0f-c5a7b572f4ed","Type":"ContainerStarted","Data":"836eeb12dd904eebff38100c5c9ff6aca5b8aa8dfae6bc6c5e2646ae477f6260"} Nov 25 10:01:52 crc kubenswrapper[4854]: I1125 10:01:52.354296 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3c760277-34b8-4444-9c0f-c5a7b572f4ed","Type":"ContainerStarted","Data":"eec19b0ac452c88d19babff2a178063671a7b795298ff43b465ca8a5bbf01c4e"} Nov 25 10:01:52 crc kubenswrapper[4854]: I1125 10:01:52.385453 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.385425871 podStartE2EDuration="2.385425871s" podCreationTimestamp="2025-11-25 10:01:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:01:52.372424582 +0000 UTC m=+1518.225417958" watchObservedRunningTime="2025-11-25 10:01:52.385425871 +0000 UTC m=+1518.238419277" Nov 25 10:01:54 crc kubenswrapper[4854]: I1125 10:01:54.757592 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 10:01:54 crc kubenswrapper[4854]: I1125 10:01:54.758175 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 10:01:55 crc kubenswrapper[4854]: I1125 10:01:55.028478 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:01:55 crc kubenswrapper[4854]: I1125 10:01:55.028535 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:01:55 crc kubenswrapper[4854]: I1125 10:01:55.880620 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 25 10:01:58 crc kubenswrapper[4854]: I1125 10:01:58.842632 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 10:01:58 crc kubenswrapper[4854]: I1125 10:01:58.843223 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 10:01:59 crc kubenswrapper[4854]: I1125 10:01:59.757689 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 10:01:59 crc kubenswrapper[4854]: I1125 10:01:59.757772 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 10:01:59 crc kubenswrapper[4854]: I1125 10:01:59.860887 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="d276cec0-76c5-44c3-91a5-669c20aaba25" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.1.8:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 10:01:59 crc kubenswrapper[4854]: I1125 10:01:59.860926 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="d276cec0-76c5-44c3-91a5-669c20aaba25" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.1.8:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 10:02:00 crc kubenswrapper[4854]: I1125 10:02:00.773960 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="abaf3b4c-e24e-4387-950e-b0b50ed7d0e5" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.1.9:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 10:02:00 crc kubenswrapper[4854]: I1125 10:02:00.774282 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="abaf3b4c-e24e-4387-950e-b0b50ed7d0e5" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.1.9:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 10:02:00 crc kubenswrapper[4854]: I1125 10:02:00.880805 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 25 10:02:00 crc kubenswrapper[4854]: I1125 10:02:00.946916 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 25 10:02:01 crc kubenswrapper[4854]: I1125 10:02:01.521910 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 25 10:02:02 crc kubenswrapper[4854]: I1125 10:02:02.441922 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 25 10:02:08 crc kubenswrapper[4854]: I1125 10:02:08.850210 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 10:02:08 crc kubenswrapper[4854]: I1125 10:02:08.850979 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 10:02:08 crc kubenswrapper[4854]: I1125 10:02:08.855917 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 10:02:08 crc kubenswrapper[4854]: I1125 10:02:08.863303 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 10:02:09 crc kubenswrapper[4854]: I1125 10:02:09.585070 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 10:02:09 crc kubenswrapper[4854]: I1125 10:02:09.591017 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 10:02:09 crc kubenswrapper[4854]: I1125 10:02:09.767692 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 25 10:02:09 crc kubenswrapper[4854]: I1125 10:02:09.770127 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 25 10:02:09 crc kubenswrapper[4854]: I1125 10:02:09.788631 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 25 10:02:10 crc kubenswrapper[4854]: I1125 10:02:10.605932 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 25 10:02:20 crc kubenswrapper[4854]: I1125 10:02:20.507377 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-sync-pfkjn"] Nov 25 10:02:20 crc kubenswrapper[4854]: I1125 10:02:20.524179 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-sync-pfkjn"] Nov 25 10:02:20 crc kubenswrapper[4854]: I1125 10:02:20.605057 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-hcn8p"] Nov 25 10:02:20 crc kubenswrapper[4854]: I1125 10:02:20.606805 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-hcn8p" Nov 25 10:02:20 crc kubenswrapper[4854]: I1125 10:02:20.617041 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-hcn8p"] Nov 25 10:02:20 crc kubenswrapper[4854]: I1125 10:02:20.750785 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7909b58c-9614-4859-b5c9-be2fd2c77fc8-combined-ca-bundle\") pod \"heat-db-sync-hcn8p\" (UID: \"7909b58c-9614-4859-b5c9-be2fd2c77fc8\") " pod="openstack/heat-db-sync-hcn8p" Nov 25 10:02:20 crc kubenswrapper[4854]: I1125 10:02:20.751000 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xg9db\" (UniqueName: \"kubernetes.io/projected/7909b58c-9614-4859-b5c9-be2fd2c77fc8-kube-api-access-xg9db\") pod \"heat-db-sync-hcn8p\" (UID: \"7909b58c-9614-4859-b5c9-be2fd2c77fc8\") " pod="openstack/heat-db-sync-hcn8p" Nov 25 10:02:20 crc kubenswrapper[4854]: I1125 10:02:20.752234 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7909b58c-9614-4859-b5c9-be2fd2c77fc8-config-data\") pod \"heat-db-sync-hcn8p\" (UID: \"7909b58c-9614-4859-b5c9-be2fd2c77fc8\") " pod="openstack/heat-db-sync-hcn8p" Nov 25 10:02:20 crc kubenswrapper[4854]: I1125 10:02:20.855863 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7909b58c-9614-4859-b5c9-be2fd2c77fc8-config-data\") pod \"heat-db-sync-hcn8p\" (UID: \"7909b58c-9614-4859-b5c9-be2fd2c77fc8\") " pod="openstack/heat-db-sync-hcn8p" Nov 25 10:02:20 crc kubenswrapper[4854]: I1125 10:02:20.856041 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7909b58c-9614-4859-b5c9-be2fd2c77fc8-combined-ca-bundle\") pod \"heat-db-sync-hcn8p\" (UID: \"7909b58c-9614-4859-b5c9-be2fd2c77fc8\") " pod="openstack/heat-db-sync-hcn8p" Nov 25 10:02:20 crc kubenswrapper[4854]: I1125 10:02:20.856102 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xg9db\" (UniqueName: \"kubernetes.io/projected/7909b58c-9614-4859-b5c9-be2fd2c77fc8-kube-api-access-xg9db\") pod \"heat-db-sync-hcn8p\" (UID: \"7909b58c-9614-4859-b5c9-be2fd2c77fc8\") " pod="openstack/heat-db-sync-hcn8p" Nov 25 10:02:20 crc kubenswrapper[4854]: I1125 10:02:20.862602 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7909b58c-9614-4859-b5c9-be2fd2c77fc8-config-data\") pod \"heat-db-sync-hcn8p\" (UID: \"7909b58c-9614-4859-b5c9-be2fd2c77fc8\") " pod="openstack/heat-db-sync-hcn8p" Nov 25 10:02:20 crc kubenswrapper[4854]: I1125 10:02:20.864232 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7909b58c-9614-4859-b5c9-be2fd2c77fc8-combined-ca-bundle\") pod \"heat-db-sync-hcn8p\" (UID: \"7909b58c-9614-4859-b5c9-be2fd2c77fc8\") " pod="openstack/heat-db-sync-hcn8p" Nov 25 10:02:20 crc kubenswrapper[4854]: I1125 10:02:20.877953 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xg9db\" (UniqueName: \"kubernetes.io/projected/7909b58c-9614-4859-b5c9-be2fd2c77fc8-kube-api-access-xg9db\") pod \"heat-db-sync-hcn8p\" (UID: \"7909b58c-9614-4859-b5c9-be2fd2c77fc8\") " pod="openstack/heat-db-sync-hcn8p" Nov 25 10:02:20 crc kubenswrapper[4854]: I1125 10:02:20.937491 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-hcn8p" Nov 25 10:02:21 crc kubenswrapper[4854]: I1125 10:02:21.033040 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ec8d5beb-439a-4921-a6b8-029331402149" path="/var/lib/kubelet/pods/ec8d5beb-439a-4921-a6b8-029331402149/volumes" Nov 25 10:02:21 crc kubenswrapper[4854]: I1125 10:02:21.485050 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-hcn8p"] Nov 25 10:02:21 crc kubenswrapper[4854]: I1125 10:02:21.492345 4854 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 10:02:21 crc kubenswrapper[4854]: I1125 10:02:21.759952 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-hcn8p" event={"ID":"7909b58c-9614-4859-b5c9-be2fd2c77fc8","Type":"ContainerStarted","Data":"15a424c324269ae502de1397a46ca1cf371d3f4b28cb2c55bcb5e9df6403ca33"} Nov 25 10:02:22 crc kubenswrapper[4854]: I1125 10:02:22.556322 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-2"] Nov 25 10:02:23 crc kubenswrapper[4854]: I1125 10:02:23.291939 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:02:23 crc kubenswrapper[4854]: I1125 10:02:23.293086 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a0da5a5f-c737-4ad7-a34a-fbad8d68866f" containerName="proxy-httpd" containerID="cri-o://ee3561d06464e30cc212bf33139332d9545746700109d217afa39e7206b50e2c" gracePeriod=30 Nov 25 10:02:23 crc kubenswrapper[4854]: I1125 10:02:23.293107 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a0da5a5f-c737-4ad7-a34a-fbad8d68866f" containerName="ceilometer-notification-agent" containerID="cri-o://2afc36587dadc064b7c6674060754426dfb229a6e026f335c26f27fe2996e9e2" gracePeriod=30 Nov 25 10:02:23 crc kubenswrapper[4854]: I1125 10:02:23.292961 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a0da5a5f-c737-4ad7-a34a-fbad8d68866f" containerName="ceilometer-central-agent" containerID="cri-o://18960e030db434f11285c8b29c4c459dd6e7da813d874967292f260e5804bb93" gracePeriod=30 Nov 25 10:02:23 crc kubenswrapper[4854]: I1125 10:02:23.293343 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a0da5a5f-c737-4ad7-a34a-fbad8d68866f" containerName="sg-core" containerID="cri-o://36e3cefde07716dbaa8af808a30b0614a66ec14c198be98dae5c922c69998bdc" gracePeriod=30 Nov 25 10:02:23 crc kubenswrapper[4854]: I1125 10:02:23.662954 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 10:02:23 crc kubenswrapper[4854]: I1125 10:02:23.799422 4854 generic.go:334] "Generic (PLEG): container finished" podID="a0da5a5f-c737-4ad7-a34a-fbad8d68866f" containerID="ee3561d06464e30cc212bf33139332d9545746700109d217afa39e7206b50e2c" exitCode=0 Nov 25 10:02:23 crc kubenswrapper[4854]: I1125 10:02:23.799455 4854 generic.go:334] "Generic (PLEG): container finished" podID="a0da5a5f-c737-4ad7-a34a-fbad8d68866f" containerID="36e3cefde07716dbaa8af808a30b0614a66ec14c198be98dae5c922c69998bdc" exitCode=2 Nov 25 10:02:23 crc kubenswrapper[4854]: I1125 10:02:23.799475 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a0da5a5f-c737-4ad7-a34a-fbad8d68866f","Type":"ContainerDied","Data":"ee3561d06464e30cc212bf33139332d9545746700109d217afa39e7206b50e2c"} Nov 25 10:02:23 crc kubenswrapper[4854]: I1125 10:02:23.799499 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a0da5a5f-c737-4ad7-a34a-fbad8d68866f","Type":"ContainerDied","Data":"36e3cefde07716dbaa8af808a30b0614a66ec14c198be98dae5c922c69998bdc"} Nov 25 10:02:24 crc kubenswrapper[4854]: I1125 10:02:24.824219 4854 generic.go:334] "Generic (PLEG): container finished" podID="a0da5a5f-c737-4ad7-a34a-fbad8d68866f" containerID="18960e030db434f11285c8b29c4c459dd6e7da813d874967292f260e5804bb93" exitCode=0 Nov 25 10:02:24 crc kubenswrapper[4854]: I1125 10:02:24.824264 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a0da5a5f-c737-4ad7-a34a-fbad8d68866f","Type":"ContainerDied","Data":"18960e030db434f11285c8b29c4c459dd6e7da813d874967292f260e5804bb93"} Nov 25 10:02:25 crc kubenswrapper[4854]: I1125 10:02:25.028461 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:02:25 crc kubenswrapper[4854]: I1125 10:02:25.028503 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:02:27 crc kubenswrapper[4854]: I1125 10:02:27.274289 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-2" podUID="dc586641-37b8-4b9b-8479-a3c552bec71d" containerName="rabbitmq" containerID="cri-o://8d6f89d7d6fbc896c63253f9ef919d0be6fde4db080cb205d97fa50cdbb02239" gracePeriod=604796 Nov 25 10:02:28 crc kubenswrapper[4854]: I1125 10:02:28.162519 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="575fe5df-5a76-4633-9688-3997a708f3f4" containerName="rabbitmq" containerID="cri-o://41e467847c880fe581389b94e5ceda22ae9ee9e03a9dbd9ef358e10e52b6dca3" gracePeriod=604796 Nov 25 10:02:28 crc kubenswrapper[4854]: I1125 10:02:28.872874 4854 generic.go:334] "Generic (PLEG): container finished" podID="a0da5a5f-c737-4ad7-a34a-fbad8d68866f" containerID="2afc36587dadc064b7c6674060754426dfb229a6e026f335c26f27fe2996e9e2" exitCode=0 Nov 25 10:02:28 crc kubenswrapper[4854]: I1125 10:02:28.872940 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a0da5a5f-c737-4ad7-a34a-fbad8d68866f","Type":"ContainerDied","Data":"2afc36587dadc064b7c6674060754426dfb229a6e026f335c26f27fe2996e9e2"} Nov 25 10:02:33 crc kubenswrapper[4854]: I1125 10:02:33.936477 4854 generic.go:334] "Generic (PLEG): container finished" podID="dc586641-37b8-4b9b-8479-a3c552bec71d" containerID="8d6f89d7d6fbc896c63253f9ef919d0be6fde4db080cb205d97fa50cdbb02239" exitCode=0 Nov 25 10:02:33 crc kubenswrapper[4854]: I1125 10:02:33.936566 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"dc586641-37b8-4b9b-8479-a3c552bec71d","Type":"ContainerDied","Data":"8d6f89d7d6fbc896c63253f9ef919d0be6fde4db080cb205d97fa50cdbb02239"} Nov 25 10:02:34 crc kubenswrapper[4854]: I1125 10:02:34.866130 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:02:34 crc kubenswrapper[4854]: I1125 10:02:34.952750 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a0da5a5f-c737-4ad7-a34a-fbad8d68866f","Type":"ContainerDied","Data":"c5fd2efbce63049501009ac64db22d9cfd04dd865afd0efaff354dde42e03897"} Nov 25 10:02:34 crc kubenswrapper[4854]: I1125 10:02:34.952811 4854 scope.go:117] "RemoveContainer" containerID="ee3561d06464e30cc212bf33139332d9545746700109d217afa39e7206b50e2c" Nov 25 10:02:34 crc kubenswrapper[4854]: I1125 10:02:34.952992 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:02:34 crc kubenswrapper[4854]: I1125 10:02:34.957736 4854 generic.go:334] "Generic (PLEG): container finished" podID="575fe5df-5a76-4633-9688-3997a708f3f4" containerID="41e467847c880fe581389b94e5ceda22ae9ee9e03a9dbd9ef358e10e52b6dca3" exitCode=0 Nov 25 10:02:34 crc kubenswrapper[4854]: I1125 10:02:34.957768 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"575fe5df-5a76-4633-9688-3997a708f3f4","Type":"ContainerDied","Data":"41e467847c880fe581389b94e5ceda22ae9ee9e03a9dbd9ef358e10e52b6dca3"} Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.007935 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-config-data\") pod \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\" (UID: \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\") " Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.007994 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-log-httpd\") pod \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\" (UID: \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\") " Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.008137 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-run-httpd\") pod \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\" (UID: \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\") " Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.008188 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-ceilometer-tls-certs\") pod \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\" (UID: \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\") " Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.008267 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-scripts\") pod \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\" (UID: \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\") " Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.008320 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-combined-ca-bundle\") pod \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\" (UID: \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\") " Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.008367 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-sg-core-conf-yaml\") pod \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\" (UID: \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\") " Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.008496 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xfb8m\" (UniqueName: \"kubernetes.io/projected/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-kube-api-access-xfb8m\") pod \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\" (UID: \"a0da5a5f-c737-4ad7-a34a-fbad8d68866f\") " Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.008538 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "a0da5a5f-c737-4ad7-a34a-fbad8d68866f" (UID: "a0da5a5f-c737-4ad7-a34a-fbad8d68866f"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.008638 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "a0da5a5f-c737-4ad7-a34a-fbad8d68866f" (UID: "a0da5a5f-c737-4ad7-a34a-fbad8d68866f"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.009240 4854 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.009259 4854 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.027286 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-kube-api-access-xfb8m" (OuterVolumeSpecName: "kube-api-access-xfb8m") pod "a0da5a5f-c737-4ad7-a34a-fbad8d68866f" (UID: "a0da5a5f-c737-4ad7-a34a-fbad8d68866f"). InnerVolumeSpecName "kube-api-access-xfb8m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.033478 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-scripts" (OuterVolumeSpecName: "scripts") pod "a0da5a5f-c737-4ad7-a34a-fbad8d68866f" (UID: "a0da5a5f-c737-4ad7-a34a-fbad8d68866f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.062610 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "a0da5a5f-c737-4ad7-a34a-fbad8d68866f" (UID: "a0da5a5f-c737-4ad7-a34a-fbad8d68866f"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.111929 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xfb8m\" (UniqueName: \"kubernetes.io/projected/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-kube-api-access-xfb8m\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.111975 4854 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.111988 4854 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.124717 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "a0da5a5f-c737-4ad7-a34a-fbad8d68866f" (UID: "a0da5a5f-c737-4ad7-a34a-fbad8d68866f"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.200794 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a0da5a5f-c737-4ad7-a34a-fbad8d68866f" (UID: "a0da5a5f-c737-4ad7-a34a-fbad8d68866f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.214168 4854 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.214195 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.265423 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-config-data" (OuterVolumeSpecName: "config-data") pod "a0da5a5f-c737-4ad7-a34a-fbad8d68866f" (UID: "a0da5a5f-c737-4ad7-a34a-fbad8d68866f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.316377 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0da5a5f-c737-4ad7-a34a-fbad8d68866f-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.633630 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.647202 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.662646 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:02:35 crc kubenswrapper[4854]: E1125 10:02:35.667967 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0da5a5f-c737-4ad7-a34a-fbad8d68866f" containerName="ceilometer-notification-agent" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.668012 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0da5a5f-c737-4ad7-a34a-fbad8d68866f" containerName="ceilometer-notification-agent" Nov 25 10:02:35 crc kubenswrapper[4854]: E1125 10:02:35.668046 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0da5a5f-c737-4ad7-a34a-fbad8d68866f" containerName="ceilometer-central-agent" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.668055 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0da5a5f-c737-4ad7-a34a-fbad8d68866f" containerName="ceilometer-central-agent" Nov 25 10:02:35 crc kubenswrapper[4854]: E1125 10:02:35.668105 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0da5a5f-c737-4ad7-a34a-fbad8d68866f" containerName="proxy-httpd" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.668113 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0da5a5f-c737-4ad7-a34a-fbad8d68866f" containerName="proxy-httpd" Nov 25 10:02:35 crc kubenswrapper[4854]: E1125 10:02:35.668134 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0da5a5f-c737-4ad7-a34a-fbad8d68866f" containerName="sg-core" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.668143 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0da5a5f-c737-4ad7-a34a-fbad8d68866f" containerName="sg-core" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.668576 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0da5a5f-c737-4ad7-a34a-fbad8d68866f" containerName="sg-core" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.668596 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0da5a5f-c737-4ad7-a34a-fbad8d68866f" containerName="proxy-httpd" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.668609 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0da5a5f-c737-4ad7-a34a-fbad8d68866f" containerName="ceilometer-notification-agent" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.668620 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0da5a5f-c737-4ad7-a34a-fbad8d68866f" containerName="ceilometer-central-agent" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.670892 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.678580 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.678853 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.678993 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.688531 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.830483 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e73606dc-c7c0-4d1e-9f87-5effe3a03611-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e73606dc-c7c0-4d1e-9f87-5effe3a03611\") " pod="openstack/ceilometer-0" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.830533 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ng96t\" (UniqueName: \"kubernetes.io/projected/e73606dc-c7c0-4d1e-9f87-5effe3a03611-kube-api-access-ng96t\") pod \"ceilometer-0\" (UID: \"e73606dc-c7c0-4d1e-9f87-5effe3a03611\") " pod="openstack/ceilometer-0" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.830592 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e73606dc-c7c0-4d1e-9f87-5effe3a03611-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e73606dc-c7c0-4d1e-9f87-5effe3a03611\") " pod="openstack/ceilometer-0" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.830701 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e73606dc-c7c0-4d1e-9f87-5effe3a03611-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e73606dc-c7c0-4d1e-9f87-5effe3a03611\") " pod="openstack/ceilometer-0" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.830785 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e73606dc-c7c0-4d1e-9f87-5effe3a03611-log-httpd\") pod \"ceilometer-0\" (UID: \"e73606dc-c7c0-4d1e-9f87-5effe3a03611\") " pod="openstack/ceilometer-0" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.830948 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e73606dc-c7c0-4d1e-9f87-5effe3a03611-config-data\") pod \"ceilometer-0\" (UID: \"e73606dc-c7c0-4d1e-9f87-5effe3a03611\") " pod="openstack/ceilometer-0" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.830980 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e73606dc-c7c0-4d1e-9f87-5effe3a03611-run-httpd\") pod \"ceilometer-0\" (UID: \"e73606dc-c7c0-4d1e-9f87-5effe3a03611\") " pod="openstack/ceilometer-0" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.831052 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e73606dc-c7c0-4d1e-9f87-5effe3a03611-scripts\") pod \"ceilometer-0\" (UID: \"e73606dc-c7c0-4d1e-9f87-5effe3a03611\") " pod="openstack/ceilometer-0" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.932809 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e73606dc-c7c0-4d1e-9f87-5effe3a03611-scripts\") pod \"ceilometer-0\" (UID: \"e73606dc-c7c0-4d1e-9f87-5effe3a03611\") " pod="openstack/ceilometer-0" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.932915 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e73606dc-c7c0-4d1e-9f87-5effe3a03611-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e73606dc-c7c0-4d1e-9f87-5effe3a03611\") " pod="openstack/ceilometer-0" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.932944 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ng96t\" (UniqueName: \"kubernetes.io/projected/e73606dc-c7c0-4d1e-9f87-5effe3a03611-kube-api-access-ng96t\") pod \"ceilometer-0\" (UID: \"e73606dc-c7c0-4d1e-9f87-5effe3a03611\") " pod="openstack/ceilometer-0" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.932999 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e73606dc-c7c0-4d1e-9f87-5effe3a03611-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e73606dc-c7c0-4d1e-9f87-5effe3a03611\") " pod="openstack/ceilometer-0" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.933077 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e73606dc-c7c0-4d1e-9f87-5effe3a03611-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e73606dc-c7c0-4d1e-9f87-5effe3a03611\") " pod="openstack/ceilometer-0" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.933147 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e73606dc-c7c0-4d1e-9f87-5effe3a03611-log-httpd\") pod \"ceilometer-0\" (UID: \"e73606dc-c7c0-4d1e-9f87-5effe3a03611\") " pod="openstack/ceilometer-0" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.933911 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e73606dc-c7c0-4d1e-9f87-5effe3a03611-log-httpd\") pod \"ceilometer-0\" (UID: \"e73606dc-c7c0-4d1e-9f87-5effe3a03611\") " pod="openstack/ceilometer-0" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.934131 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e73606dc-c7c0-4d1e-9f87-5effe3a03611-config-data\") pod \"ceilometer-0\" (UID: \"e73606dc-c7c0-4d1e-9f87-5effe3a03611\") " pod="openstack/ceilometer-0" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.934166 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e73606dc-c7c0-4d1e-9f87-5effe3a03611-run-httpd\") pod \"ceilometer-0\" (UID: \"e73606dc-c7c0-4d1e-9f87-5effe3a03611\") " pod="openstack/ceilometer-0" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.934525 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e73606dc-c7c0-4d1e-9f87-5effe3a03611-run-httpd\") pod \"ceilometer-0\" (UID: \"e73606dc-c7c0-4d1e-9f87-5effe3a03611\") " pod="openstack/ceilometer-0" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.938065 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e73606dc-c7c0-4d1e-9f87-5effe3a03611-scripts\") pod \"ceilometer-0\" (UID: \"e73606dc-c7c0-4d1e-9f87-5effe3a03611\") " pod="openstack/ceilometer-0" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.939134 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e73606dc-c7c0-4d1e-9f87-5effe3a03611-config-data\") pod \"ceilometer-0\" (UID: \"e73606dc-c7c0-4d1e-9f87-5effe3a03611\") " pod="openstack/ceilometer-0" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.939228 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e73606dc-c7c0-4d1e-9f87-5effe3a03611-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e73606dc-c7c0-4d1e-9f87-5effe3a03611\") " pod="openstack/ceilometer-0" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.941416 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e73606dc-c7c0-4d1e-9f87-5effe3a03611-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e73606dc-c7c0-4d1e-9f87-5effe3a03611\") " pod="openstack/ceilometer-0" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.947381 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e73606dc-c7c0-4d1e-9f87-5effe3a03611-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e73606dc-c7c0-4d1e-9f87-5effe3a03611\") " pod="openstack/ceilometer-0" Nov 25 10:02:35 crc kubenswrapper[4854]: I1125 10:02:35.950479 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ng96t\" (UniqueName: \"kubernetes.io/projected/e73606dc-c7c0-4d1e-9f87-5effe3a03611-kube-api-access-ng96t\") pod \"ceilometer-0\" (UID: \"e73606dc-c7c0-4d1e-9f87-5effe3a03611\") " pod="openstack/ceilometer-0" Nov 25 10:02:36 crc kubenswrapper[4854]: I1125 10:02:36.006952 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:02:37 crc kubenswrapper[4854]: I1125 10:02:37.035115 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0da5a5f-c737-4ad7-a34a-fbad8d68866f" path="/var/lib/kubelet/pods/a0da5a5f-c737-4ad7-a34a-fbad8d68866f/volumes" Nov 25 10:02:39 crc kubenswrapper[4854]: I1125 10:02:39.595932 4854 scope.go:117] "RemoveContainer" containerID="36e3cefde07716dbaa8af808a30b0614a66ec14c198be98dae5c922c69998bdc" Nov 25 10:02:39 crc kubenswrapper[4854]: E1125 10:02:39.670480 4854 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Nov 25 10:02:39 crc kubenswrapper[4854]: E1125 10:02:39.670544 4854 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Nov 25 10:02:39 crc kubenswrapper[4854]: E1125 10:02:39.670714 4854 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xg9db,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-hcn8p_openstack(7909b58c-9614-4859-b5c9-be2fd2c77fc8): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 10:02:39 crc kubenswrapper[4854]: E1125 10:02:39.671952 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/heat-db-sync-hcn8p" podUID="7909b58c-9614-4859-b5c9-be2fd2c77fc8" Nov 25 10:02:39 crc kubenswrapper[4854]: I1125 10:02:39.741770 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:02:39 crc kubenswrapper[4854]: I1125 10:02:39.754351 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-2" Nov 25 10:02:39 crc kubenswrapper[4854]: I1125 10:02:39.802368 4854 scope.go:117] "RemoveContainer" containerID="2afc36587dadc064b7c6674060754426dfb229a6e026f335c26f27fe2996e9e2" Nov 25 10:02:39 crc kubenswrapper[4854]: I1125 10:02:39.842744 4854 scope.go:117] "RemoveContainer" containerID="18960e030db434f11285c8b29c4c459dd6e7da813d874967292f260e5804bb93" Nov 25 10:02:39 crc kubenswrapper[4854]: I1125 10:02:39.848691 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-2" podUID="dc586641-37b8-4b9b-8479-a3c552bec71d" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.130:5671: i/o timeout" Nov 25 10:02:39 crc kubenswrapper[4854]: I1125 10:02:39.926438 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/dc586641-37b8-4b9b-8479-a3c552bec71d-erlang-cookie-secret\") pod \"dc586641-37b8-4b9b-8479-a3c552bec71d\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " Nov 25 10:02:39 crc kubenswrapper[4854]: I1125 10:02:39.926584 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fx929\" (UniqueName: \"kubernetes.io/projected/575fe5df-5a76-4633-9688-3997a708f3f4-kube-api-access-fx929\") pod \"575fe5df-5a76-4633-9688-3997a708f3f4\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " Nov 25 10:02:39 crc kubenswrapper[4854]: I1125 10:02:39.926645 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/dc586641-37b8-4b9b-8479-a3c552bec71d-server-conf\") pod \"dc586641-37b8-4b9b-8479-a3c552bec71d\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " Nov 25 10:02:39 crc kubenswrapper[4854]: I1125 10:02:39.926759 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/dc586641-37b8-4b9b-8479-a3c552bec71d-rabbitmq-tls\") pod \"dc586641-37b8-4b9b-8479-a3c552bec71d\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " Nov 25 10:02:39 crc kubenswrapper[4854]: I1125 10:02:39.926804 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/575fe5df-5a76-4633-9688-3997a708f3f4-pod-info\") pod \"575fe5df-5a76-4633-9688-3997a708f3f4\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " Nov 25 10:02:39 crc kubenswrapper[4854]: I1125 10:02:39.926841 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dc586641-37b8-4b9b-8479-a3c552bec71d-config-data\") pod \"dc586641-37b8-4b9b-8479-a3c552bec71d\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " Nov 25 10:02:39 crc kubenswrapper[4854]: I1125 10:02:39.926866 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/575fe5df-5a76-4633-9688-3997a708f3f4-rabbitmq-tls\") pod \"575fe5df-5a76-4633-9688-3997a708f3f4\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " Nov 25 10:02:39 crc kubenswrapper[4854]: I1125 10:02:39.926908 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/575fe5df-5a76-4633-9688-3997a708f3f4-plugins-conf\") pod \"575fe5df-5a76-4633-9688-3997a708f3f4\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " Nov 25 10:02:39 crc kubenswrapper[4854]: I1125 10:02:39.926980 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"dc586641-37b8-4b9b-8479-a3c552bec71d\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " Nov 25 10:02:39 crc kubenswrapper[4854]: I1125 10:02:39.927069 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/575fe5df-5a76-4633-9688-3997a708f3f4-erlang-cookie-secret\") pod \"575fe5df-5a76-4633-9688-3997a708f3f4\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " Nov 25 10:02:39 crc kubenswrapper[4854]: I1125 10:02:39.927102 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/dc586641-37b8-4b9b-8479-a3c552bec71d-rabbitmq-confd\") pod \"dc586641-37b8-4b9b-8479-a3c552bec71d\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " Nov 25 10:02:39 crc kubenswrapper[4854]: I1125 10:02:39.927163 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/dc586641-37b8-4b9b-8479-a3c552bec71d-rabbitmq-erlang-cookie\") pod \"dc586641-37b8-4b9b-8479-a3c552bec71d\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " Nov 25 10:02:39 crc kubenswrapper[4854]: I1125 10:02:39.927201 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/575fe5df-5a76-4633-9688-3997a708f3f4-server-conf\") pod \"575fe5df-5a76-4633-9688-3997a708f3f4\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " Nov 25 10:02:39 crc kubenswrapper[4854]: I1125 10:02:39.927231 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/dc586641-37b8-4b9b-8479-a3c552bec71d-plugins-conf\") pod \"dc586641-37b8-4b9b-8479-a3c552bec71d\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " Nov 25 10:02:39 crc kubenswrapper[4854]: I1125 10:02:39.927254 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"575fe5df-5a76-4633-9688-3997a708f3f4\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " Nov 25 10:02:39 crc kubenswrapper[4854]: I1125 10:02:39.927280 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5mgt5\" (UniqueName: \"kubernetes.io/projected/dc586641-37b8-4b9b-8479-a3c552bec71d-kube-api-access-5mgt5\") pod \"dc586641-37b8-4b9b-8479-a3c552bec71d\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " Nov 25 10:02:39 crc kubenswrapper[4854]: I1125 10:02:39.927307 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/dc586641-37b8-4b9b-8479-a3c552bec71d-pod-info\") pod \"dc586641-37b8-4b9b-8479-a3c552bec71d\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " Nov 25 10:02:39 crc kubenswrapper[4854]: I1125 10:02:39.927335 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/dc586641-37b8-4b9b-8479-a3c552bec71d-rabbitmq-plugins\") pod \"dc586641-37b8-4b9b-8479-a3c552bec71d\" (UID: \"dc586641-37b8-4b9b-8479-a3c552bec71d\") " Nov 25 10:02:39 crc kubenswrapper[4854]: I1125 10:02:39.927388 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/575fe5df-5a76-4633-9688-3997a708f3f4-rabbitmq-confd\") pod \"575fe5df-5a76-4633-9688-3997a708f3f4\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " Nov 25 10:02:39 crc kubenswrapper[4854]: I1125 10:02:39.927444 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/575fe5df-5a76-4633-9688-3997a708f3f4-config-data\") pod \"575fe5df-5a76-4633-9688-3997a708f3f4\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " Nov 25 10:02:39 crc kubenswrapper[4854]: I1125 10:02:39.927470 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/575fe5df-5a76-4633-9688-3997a708f3f4-rabbitmq-erlang-cookie\") pod \"575fe5df-5a76-4633-9688-3997a708f3f4\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " Nov 25 10:02:39 crc kubenswrapper[4854]: I1125 10:02:39.927491 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/575fe5df-5a76-4633-9688-3997a708f3f4-rabbitmq-plugins\") pod \"575fe5df-5a76-4633-9688-3997a708f3f4\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " Nov 25 10:02:39 crc kubenswrapper[4854]: I1125 10:02:39.940600 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "persistence") pod "dc586641-37b8-4b9b-8479-a3c552bec71d" (UID: "dc586641-37b8-4b9b-8479-a3c552bec71d"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 10:02:39 crc kubenswrapper[4854]: I1125 10:02:39.963691 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/575fe5df-5a76-4633-9688-3997a708f3f4-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "575fe5df-5a76-4633-9688-3997a708f3f4" (UID: "575fe5df-5a76-4633-9688-3997a708f3f4"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.013722 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "persistence") pod "575fe5df-5a76-4633-9688-3997a708f3f4" (UID: "575fe5df-5a76-4633-9688-3997a708f3f4"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.027570 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc586641-37b8-4b9b-8479-a3c552bec71d-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "dc586641-37b8-4b9b-8479-a3c552bec71d" (UID: "dc586641-37b8-4b9b-8479-a3c552bec71d"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.027706 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/575fe5df-5a76-4633-9688-3997a708f3f4-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "575fe5df-5a76-4633-9688-3997a708f3f4" (UID: "575fe5df-5a76-4633-9688-3997a708f3f4"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.029780 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/dc586641-37b8-4b9b-8479-a3c552bec71d-pod-info" (OuterVolumeSpecName: "pod-info") pod "dc586641-37b8-4b9b-8479-a3c552bec71d" (UID: "dc586641-37b8-4b9b-8479-a3c552bec71d"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.031184 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/575fe5df-5a76-4633-9688-3997a708f3f4-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "575fe5df-5a76-4633-9688-3997a708f3f4" (UID: "575fe5df-5a76-4633-9688-3997a708f3f4"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.031589 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/575fe5df-5a76-4633-9688-3997a708f3f4-rabbitmq-tls\") pod \"575fe5df-5a76-4633-9688-3997a708f3f4\" (UID: \"575fe5df-5a76-4633-9688-3997a708f3f4\") " Nov 25 10:02:40 crc kubenswrapper[4854]: W1125 10:02:40.031826 4854 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/575fe5df-5a76-4633-9688-3997a708f3f4/volumes/kubernetes.io~projected/rabbitmq-tls Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.031907 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/575fe5df-5a76-4633-9688-3997a708f3f4-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "575fe5df-5a76-4633-9688-3997a708f3f4" (UID: "575fe5df-5a76-4633-9688-3997a708f3f4"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.032057 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc586641-37b8-4b9b-8479-a3c552bec71d-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "dc586641-37b8-4b9b-8479-a3c552bec71d" (UID: "dc586641-37b8-4b9b-8479-a3c552bec71d"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.032558 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc586641-37b8-4b9b-8479-a3c552bec71d-kube-api-access-5mgt5" (OuterVolumeSpecName: "kube-api-access-5mgt5") pod "dc586641-37b8-4b9b-8479-a3c552bec71d" (UID: "dc586641-37b8-4b9b-8479-a3c552bec71d"). InnerVolumeSpecName "kube-api-access-5mgt5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.032784 4854 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/dc586641-37b8-4b9b-8479-a3c552bec71d-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.032816 4854 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.032827 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5mgt5\" (UniqueName: \"kubernetes.io/projected/dc586641-37b8-4b9b-8479-a3c552bec71d-kube-api-access-5mgt5\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.032838 4854 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/dc586641-37b8-4b9b-8479-a3c552bec71d-pod-info\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.032847 4854 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/575fe5df-5a76-4633-9688-3997a708f3f4-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.032859 4854 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/575fe5df-5a76-4633-9688-3997a708f3f4-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.032871 4854 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/dc586641-37b8-4b9b-8479-a3c552bec71d-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.032882 4854 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/575fe5df-5a76-4633-9688-3997a708f3f4-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.032899 4854 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.033969 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dc586641-37b8-4b9b-8479-a3c552bec71d-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "dc586641-37b8-4b9b-8479-a3c552bec71d" (UID: "dc586641-37b8-4b9b-8479-a3c552bec71d"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.037789 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/575fe5df-5a76-4633-9688-3997a708f3f4-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "575fe5df-5a76-4633-9688-3997a708f3f4" (UID: "575fe5df-5a76-4633-9688-3997a708f3f4"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.038475 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-2" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.038498 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"dc586641-37b8-4b9b-8479-a3c552bec71d","Type":"ContainerDied","Data":"2c5f4b5c111a680106b37dde0493ba1d7eb69ffc35b6bdb885b1ccd993890d4b"} Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.038543 4854 scope.go:117] "RemoveContainer" containerID="8d6f89d7d6fbc896c63253f9ef919d0be6fde4db080cb205d97fa50cdbb02239" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.040825 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/575fe5df-5a76-4633-9688-3997a708f3f4-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "575fe5df-5a76-4633-9688-3997a708f3f4" (UID: "575fe5df-5a76-4633-9688-3997a708f3f4"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.041829 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc586641-37b8-4b9b-8479-a3c552bec71d-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "dc586641-37b8-4b9b-8479-a3c552bec71d" (UID: "dc586641-37b8-4b9b-8479-a3c552bec71d"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.049076 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dc586641-37b8-4b9b-8479-a3c552bec71d-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "dc586641-37b8-4b9b-8479-a3c552bec71d" (UID: "dc586641-37b8-4b9b-8479-a3c552bec71d"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.053353 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/575fe5df-5a76-4633-9688-3997a708f3f4-kube-api-access-fx929" (OuterVolumeSpecName: "kube-api-access-fx929") pod "575fe5df-5a76-4633-9688-3997a708f3f4" (UID: "575fe5df-5a76-4633-9688-3997a708f3f4"). InnerVolumeSpecName "kube-api-access-fx929". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.053447 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/575fe5df-5a76-4633-9688-3997a708f3f4-pod-info" (OuterVolumeSpecName: "pod-info") pod "575fe5df-5a76-4633-9688-3997a708f3f4" (UID: "575fe5df-5a76-4633-9688-3997a708f3f4"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.058511 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"575fe5df-5a76-4633-9688-3997a708f3f4","Type":"ContainerDied","Data":"4b4e527f168e77fa66753edca299b2df7f7dbcee7e5edb7d180baa2e35bb87be"} Nov 25 10:02:40 crc kubenswrapper[4854]: E1125 10:02:40.060472 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-hcn8p" podUID="7909b58c-9614-4859-b5c9-be2fd2c77fc8" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.061176 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.122360 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc586641-37b8-4b9b-8479-a3c552bec71d-server-conf" (OuterVolumeSpecName: "server-conf") pod "dc586641-37b8-4b9b-8479-a3c552bec71d" (UID: "dc586641-37b8-4b9b-8479-a3c552bec71d"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.136436 4854 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/dc586641-37b8-4b9b-8479-a3c552bec71d-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.136788 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fx929\" (UniqueName: \"kubernetes.io/projected/575fe5df-5a76-4633-9688-3997a708f3f4-kube-api-access-fx929\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.136803 4854 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/dc586641-37b8-4b9b-8479-a3c552bec71d-server-conf\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.136813 4854 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/dc586641-37b8-4b9b-8479-a3c552bec71d-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.136826 4854 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/575fe5df-5a76-4633-9688-3997a708f3f4-pod-info\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.136836 4854 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/575fe5df-5a76-4633-9688-3997a708f3f4-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.136847 4854 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/575fe5df-5a76-4633-9688-3997a708f3f4-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.136859 4854 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/dc586641-37b8-4b9b-8479-a3c552bec71d-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.143223 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/575fe5df-5a76-4633-9688-3997a708f3f4-server-conf" (OuterVolumeSpecName: "server-conf") pod "575fe5df-5a76-4633-9688-3997a708f3f4" (UID: "575fe5df-5a76-4633-9688-3997a708f3f4"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.214417 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/575fe5df-5a76-4633-9688-3997a708f3f4-config-data" (OuterVolumeSpecName: "config-data") pod "575fe5df-5a76-4633-9688-3997a708f3f4" (UID: "575fe5df-5a76-4633-9688-3997a708f3f4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.222894 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc586641-37b8-4b9b-8479-a3c552bec71d-config-data" (OuterVolumeSpecName: "config-data") pod "dc586641-37b8-4b9b-8479-a3c552bec71d" (UID: "dc586641-37b8-4b9b-8479-a3c552bec71d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.223097 4854 scope.go:117] "RemoveContainer" containerID="1d56aa5fdd0201276ecfee6387fdafa67cd3f4cd93571921d3582c84f66d1f16" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.223473 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="575fe5df-5a76-4633-9688-3997a708f3f4" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.132:5671: i/o timeout" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.232863 4854 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.257629 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dc586641-37b8-4b9b-8479-a3c552bec71d-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.257661 4854 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.257703 4854 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/575fe5df-5a76-4633-9688-3997a708f3f4-server-conf\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.257713 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/575fe5df-5a76-4633-9688-3997a708f3f4-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.261854 4854 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.309112 4854 scope.go:117] "RemoveContainer" containerID="41e467847c880fe581389b94e5ceda22ae9ee9e03a9dbd9ef358e10e52b6dca3" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.324613 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.331461 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/575fe5df-5a76-4633-9688-3997a708f3f4-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "575fe5df-5a76-4633-9688-3997a708f3f4" (UID: "575fe5df-5a76-4633-9688-3997a708f3f4"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.358416 4854 scope.go:117] "RemoveContainer" containerID="a3871b7ff59f7e655f575c3f64e51e999b3cdec859edb86a4be2741b96d9f09f" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.360305 4854 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.360337 4854 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/575fe5df-5a76-4633-9688-3997a708f3f4-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.360957 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5b75489c6f-znnrh"] Nov 25 10:02:40 crc kubenswrapper[4854]: E1125 10:02:40.361533 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="575fe5df-5a76-4633-9688-3997a708f3f4" containerName="rabbitmq" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.361554 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="575fe5df-5a76-4633-9688-3997a708f3f4" containerName="rabbitmq" Nov 25 10:02:40 crc kubenswrapper[4854]: E1125 10:02:40.361602 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="575fe5df-5a76-4633-9688-3997a708f3f4" containerName="setup-container" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.361609 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="575fe5df-5a76-4633-9688-3997a708f3f4" containerName="setup-container" Nov 25 10:02:40 crc kubenswrapper[4854]: E1125 10:02:40.361633 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc586641-37b8-4b9b-8479-a3c552bec71d" containerName="rabbitmq" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.361639 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc586641-37b8-4b9b-8479-a3c552bec71d" containerName="rabbitmq" Nov 25 10:02:40 crc kubenswrapper[4854]: E1125 10:02:40.361685 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc586641-37b8-4b9b-8479-a3c552bec71d" containerName="setup-container" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.361691 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc586641-37b8-4b9b-8479-a3c552bec71d" containerName="setup-container" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.362020 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc586641-37b8-4b9b-8479-a3c552bec71d" containerName="rabbitmq" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.362054 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="575fe5df-5a76-4633-9688-3997a708f3f4" containerName="rabbitmq" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.363295 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b75489c6f-znnrh" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.365326 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.366853 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc586641-37b8-4b9b-8479-a3c552bec71d-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "dc586641-37b8-4b9b-8479-a3c552bec71d" (UID: "dc586641-37b8-4b9b-8479-a3c552bec71d"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.382750 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b75489c6f-znnrh"] Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.430779 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.459089 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.462198 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v7wld\" (UniqueName: \"kubernetes.io/projected/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-kube-api-access-v7wld\") pod \"dnsmasq-dns-5b75489c6f-znnrh\" (UID: \"f8fac777-00f5-4d15-94d6-c9e0c93ca38e\") " pod="openstack/dnsmasq-dns-5b75489c6f-znnrh" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.462290 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-ovsdbserver-sb\") pod \"dnsmasq-dns-5b75489c6f-znnrh\" (UID: \"f8fac777-00f5-4d15-94d6-c9e0c93ca38e\") " pod="openstack/dnsmasq-dns-5b75489c6f-znnrh" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.462339 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-config\") pod \"dnsmasq-dns-5b75489c6f-znnrh\" (UID: \"f8fac777-00f5-4d15-94d6-c9e0c93ca38e\") " pod="openstack/dnsmasq-dns-5b75489c6f-znnrh" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.462411 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-openstack-edpm-ipam\") pod \"dnsmasq-dns-5b75489c6f-znnrh\" (UID: \"f8fac777-00f5-4d15-94d6-c9e0c93ca38e\") " pod="openstack/dnsmasq-dns-5b75489c6f-znnrh" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.462721 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-dns-svc\") pod \"dnsmasq-dns-5b75489c6f-znnrh\" (UID: \"f8fac777-00f5-4d15-94d6-c9e0c93ca38e\") " pod="openstack/dnsmasq-dns-5b75489c6f-znnrh" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.462904 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-dns-swift-storage-0\") pod \"dnsmasq-dns-5b75489c6f-znnrh\" (UID: \"f8fac777-00f5-4d15-94d6-c9e0c93ca38e\") " pod="openstack/dnsmasq-dns-5b75489c6f-znnrh" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.463035 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-ovsdbserver-nb\") pod \"dnsmasq-dns-5b75489c6f-znnrh\" (UID: \"f8fac777-00f5-4d15-94d6-c9e0c93ca38e\") " pod="openstack/dnsmasq-dns-5b75489c6f-znnrh" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.463202 4854 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/dc586641-37b8-4b9b-8479-a3c552bec71d-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.475801 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.483832 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.486704 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.489167 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-4l8k2" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.489167 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.489334 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.489424 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.489588 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.492155 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.492217 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.565554 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/bb562532-b4b2-42ad-9d8e-a9b230a3bcf5-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"bb562532-b4b2-42ad-9d8e-a9b230a3bcf5\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.565863 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-ovsdbserver-sb\") pod \"dnsmasq-dns-5b75489c6f-znnrh\" (UID: \"f8fac777-00f5-4d15-94d6-c9e0c93ca38e\") " pod="openstack/dnsmasq-dns-5b75489c6f-znnrh" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.565898 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-config\") pod \"dnsmasq-dns-5b75489c6f-znnrh\" (UID: \"f8fac777-00f5-4d15-94d6-c9e0c93ca38e\") " pod="openstack/dnsmasq-dns-5b75489c6f-znnrh" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.565942 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-openstack-edpm-ipam\") pod \"dnsmasq-dns-5b75489c6f-znnrh\" (UID: \"f8fac777-00f5-4d15-94d6-c9e0c93ca38e\") " pod="openstack/dnsmasq-dns-5b75489c6f-znnrh" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.565958 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/bb562532-b4b2-42ad-9d8e-a9b230a3bcf5-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"bb562532-b4b2-42ad-9d8e-a9b230a3bcf5\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.565974 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l27jp\" (UniqueName: \"kubernetes.io/projected/bb562532-b4b2-42ad-9d8e-a9b230a3bcf5-kube-api-access-l27jp\") pod \"rabbitmq-cell1-server-0\" (UID: \"bb562532-b4b2-42ad-9d8e-a9b230a3bcf5\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.566010 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/bb562532-b4b2-42ad-9d8e-a9b230a3bcf5-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"bb562532-b4b2-42ad-9d8e-a9b230a3bcf5\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.566060 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bb562532-b4b2-42ad-9d8e-a9b230a3bcf5-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"bb562532-b4b2-42ad-9d8e-a9b230a3bcf5\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.566144 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-dns-svc\") pod \"dnsmasq-dns-5b75489c6f-znnrh\" (UID: \"f8fac777-00f5-4d15-94d6-c9e0c93ca38e\") " pod="openstack/dnsmasq-dns-5b75489c6f-znnrh" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.566185 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/bb562532-b4b2-42ad-9d8e-a9b230a3bcf5-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"bb562532-b4b2-42ad-9d8e-a9b230a3bcf5\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.566248 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-dns-swift-storage-0\") pod \"dnsmasq-dns-5b75489c6f-znnrh\" (UID: \"f8fac777-00f5-4d15-94d6-c9e0c93ca38e\") " pod="openstack/dnsmasq-dns-5b75489c6f-znnrh" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.566293 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/bb562532-b4b2-42ad-9d8e-a9b230a3bcf5-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"bb562532-b4b2-42ad-9d8e-a9b230a3bcf5\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.566333 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-ovsdbserver-nb\") pod \"dnsmasq-dns-5b75489c6f-znnrh\" (UID: \"f8fac777-00f5-4d15-94d6-c9e0c93ca38e\") " pod="openstack/dnsmasq-dns-5b75489c6f-znnrh" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.566356 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/bb562532-b4b2-42ad-9d8e-a9b230a3bcf5-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"bb562532-b4b2-42ad-9d8e-a9b230a3bcf5\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.566410 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v7wld\" (UniqueName: \"kubernetes.io/projected/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-kube-api-access-v7wld\") pod \"dnsmasq-dns-5b75489c6f-znnrh\" (UID: \"f8fac777-00f5-4d15-94d6-c9e0c93ca38e\") " pod="openstack/dnsmasq-dns-5b75489c6f-znnrh" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.566433 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"bb562532-b4b2-42ad-9d8e-a9b230a3bcf5\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.566484 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/bb562532-b4b2-42ad-9d8e-a9b230a3bcf5-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"bb562532-b4b2-42ad-9d8e-a9b230a3bcf5\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.566507 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/bb562532-b4b2-42ad-9d8e-a9b230a3bcf5-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"bb562532-b4b2-42ad-9d8e-a9b230a3bcf5\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.567433 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-ovsdbserver-sb\") pod \"dnsmasq-dns-5b75489c6f-znnrh\" (UID: \"f8fac777-00f5-4d15-94d6-c9e0c93ca38e\") " pod="openstack/dnsmasq-dns-5b75489c6f-znnrh" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.567548 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-config\") pod \"dnsmasq-dns-5b75489c6f-znnrh\" (UID: \"f8fac777-00f5-4d15-94d6-c9e0c93ca38e\") " pod="openstack/dnsmasq-dns-5b75489c6f-znnrh" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.567960 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-openstack-edpm-ipam\") pod \"dnsmasq-dns-5b75489c6f-znnrh\" (UID: \"f8fac777-00f5-4d15-94d6-c9e0c93ca38e\") " pod="openstack/dnsmasq-dns-5b75489c6f-znnrh" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.568198 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-dns-swift-storage-0\") pod \"dnsmasq-dns-5b75489c6f-znnrh\" (UID: \"f8fac777-00f5-4d15-94d6-c9e0c93ca38e\") " pod="openstack/dnsmasq-dns-5b75489c6f-znnrh" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.569370 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-ovsdbserver-nb\") pod \"dnsmasq-dns-5b75489c6f-znnrh\" (UID: \"f8fac777-00f5-4d15-94d6-c9e0c93ca38e\") " pod="openstack/dnsmasq-dns-5b75489c6f-znnrh" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.574985 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-dns-svc\") pod \"dnsmasq-dns-5b75489c6f-znnrh\" (UID: \"f8fac777-00f5-4d15-94d6-c9e0c93ca38e\") " pod="openstack/dnsmasq-dns-5b75489c6f-znnrh" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.589894 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v7wld\" (UniqueName: \"kubernetes.io/projected/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-kube-api-access-v7wld\") pod \"dnsmasq-dns-5b75489c6f-znnrh\" (UID: \"f8fac777-00f5-4d15-94d6-c9e0c93ca38e\") " pod="openstack/dnsmasq-dns-5b75489c6f-znnrh" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.668959 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/bb562532-b4b2-42ad-9d8e-a9b230a3bcf5-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"bb562532-b4b2-42ad-9d8e-a9b230a3bcf5\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.668996 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l27jp\" (UniqueName: \"kubernetes.io/projected/bb562532-b4b2-42ad-9d8e-a9b230a3bcf5-kube-api-access-l27jp\") pod \"rabbitmq-cell1-server-0\" (UID: \"bb562532-b4b2-42ad-9d8e-a9b230a3bcf5\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.669020 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/bb562532-b4b2-42ad-9d8e-a9b230a3bcf5-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"bb562532-b4b2-42ad-9d8e-a9b230a3bcf5\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.669060 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bb562532-b4b2-42ad-9d8e-a9b230a3bcf5-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"bb562532-b4b2-42ad-9d8e-a9b230a3bcf5\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.669130 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/bb562532-b4b2-42ad-9d8e-a9b230a3bcf5-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"bb562532-b4b2-42ad-9d8e-a9b230a3bcf5\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.669187 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/bb562532-b4b2-42ad-9d8e-a9b230a3bcf5-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"bb562532-b4b2-42ad-9d8e-a9b230a3bcf5\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.671924 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/bb562532-b4b2-42ad-9d8e-a9b230a3bcf5-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"bb562532-b4b2-42ad-9d8e-a9b230a3bcf5\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.672471 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/bb562532-b4b2-42ad-9d8e-a9b230a3bcf5-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"bb562532-b4b2-42ad-9d8e-a9b230a3bcf5\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.672607 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"bb562532-b4b2-42ad-9d8e-a9b230a3bcf5\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.672718 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/bb562532-b4b2-42ad-9d8e-a9b230a3bcf5-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"bb562532-b4b2-42ad-9d8e-a9b230a3bcf5\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.672730 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bb562532-b4b2-42ad-9d8e-a9b230a3bcf5-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"bb562532-b4b2-42ad-9d8e-a9b230a3bcf5\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.672741 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/bb562532-b4b2-42ad-9d8e-a9b230a3bcf5-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"bb562532-b4b2-42ad-9d8e-a9b230a3bcf5\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.672782 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/bb562532-b4b2-42ad-9d8e-a9b230a3bcf5-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"bb562532-b4b2-42ad-9d8e-a9b230a3bcf5\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.673599 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/bb562532-b4b2-42ad-9d8e-a9b230a3bcf5-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"bb562532-b4b2-42ad-9d8e-a9b230a3bcf5\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.673787 4854 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"bb562532-b4b2-42ad-9d8e-a9b230a3bcf5\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.674351 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/bb562532-b4b2-42ad-9d8e-a9b230a3bcf5-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"bb562532-b4b2-42ad-9d8e-a9b230a3bcf5\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.674587 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/bb562532-b4b2-42ad-9d8e-a9b230a3bcf5-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"bb562532-b4b2-42ad-9d8e-a9b230a3bcf5\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.675099 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/bb562532-b4b2-42ad-9d8e-a9b230a3bcf5-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"bb562532-b4b2-42ad-9d8e-a9b230a3bcf5\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.683349 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/bb562532-b4b2-42ad-9d8e-a9b230a3bcf5-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"bb562532-b4b2-42ad-9d8e-a9b230a3bcf5\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.683968 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/bb562532-b4b2-42ad-9d8e-a9b230a3bcf5-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"bb562532-b4b2-42ad-9d8e-a9b230a3bcf5\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.685603 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b75489c6f-znnrh" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.687722 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/bb562532-b4b2-42ad-9d8e-a9b230a3bcf5-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"bb562532-b4b2-42ad-9d8e-a9b230a3bcf5\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.694073 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l27jp\" (UniqueName: \"kubernetes.io/projected/bb562532-b4b2-42ad-9d8e-a9b230a3bcf5-kube-api-access-l27jp\") pod \"rabbitmq-cell1-server-0\" (UID: \"bb562532-b4b2-42ad-9d8e-a9b230a3bcf5\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.773587 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"bb562532-b4b2-42ad-9d8e-a9b230a3bcf5\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.812665 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.901655 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-2"] Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.924876 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-2"] Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.944649 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-2"] Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.947496 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-2" Nov 25 10:02:40 crc kubenswrapper[4854]: I1125 10:02:40.958486 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-2"] Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.067403 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="575fe5df-5a76-4633-9688-3997a708f3f4" path="/var/lib/kubelet/pods/575fe5df-5a76-4633-9688-3997a708f3f4/volumes" Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.083239 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/fdceb63d-e366-47f2-954d-29730788adbb-erlang-cookie-secret\") pod \"rabbitmq-server-2\" (UID: \"fdceb63d-e366-47f2-954d-29730788adbb\") " pod="openstack/rabbitmq-server-2" Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.083298 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-2\" (UID: \"fdceb63d-e366-47f2-954d-29730788adbb\") " pod="openstack/rabbitmq-server-2" Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.083368 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fdceb63d-e366-47f2-954d-29730788adbb-config-data\") pod \"rabbitmq-server-2\" (UID: \"fdceb63d-e366-47f2-954d-29730788adbb\") " pod="openstack/rabbitmq-server-2" Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.083476 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/fdceb63d-e366-47f2-954d-29730788adbb-plugins-conf\") pod \"rabbitmq-server-2\" (UID: \"fdceb63d-e366-47f2-954d-29730788adbb\") " pod="openstack/rabbitmq-server-2" Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.083513 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/fdceb63d-e366-47f2-954d-29730788adbb-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-2\" (UID: \"fdceb63d-e366-47f2-954d-29730788adbb\") " pod="openstack/rabbitmq-server-2" Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.083544 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/fdceb63d-e366-47f2-954d-29730788adbb-server-conf\") pod \"rabbitmq-server-2\" (UID: \"fdceb63d-e366-47f2-954d-29730788adbb\") " pod="openstack/rabbitmq-server-2" Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.083627 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bgjdw\" (UniqueName: \"kubernetes.io/projected/fdceb63d-e366-47f2-954d-29730788adbb-kube-api-access-bgjdw\") pod \"rabbitmq-server-2\" (UID: \"fdceb63d-e366-47f2-954d-29730788adbb\") " pod="openstack/rabbitmq-server-2" Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.083695 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/fdceb63d-e366-47f2-954d-29730788adbb-pod-info\") pod \"rabbitmq-server-2\" (UID: \"fdceb63d-e366-47f2-954d-29730788adbb\") " pod="openstack/rabbitmq-server-2" Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.083771 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/fdceb63d-e366-47f2-954d-29730788adbb-rabbitmq-plugins\") pod \"rabbitmq-server-2\" (UID: \"fdceb63d-e366-47f2-954d-29730788adbb\") " pod="openstack/rabbitmq-server-2" Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.083811 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/fdceb63d-e366-47f2-954d-29730788adbb-rabbitmq-tls\") pod \"rabbitmq-server-2\" (UID: \"fdceb63d-e366-47f2-954d-29730788adbb\") " pod="openstack/rabbitmq-server-2" Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.083847 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/fdceb63d-e366-47f2-954d-29730788adbb-rabbitmq-confd\") pod \"rabbitmq-server-2\" (UID: \"fdceb63d-e366-47f2-954d-29730788adbb\") " pod="openstack/rabbitmq-server-2" Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.091718 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc586641-37b8-4b9b-8479-a3c552bec71d" path="/var/lib/kubelet/pods/dc586641-37b8-4b9b-8479-a3c552bec71d/volumes" Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.146871 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e73606dc-c7c0-4d1e-9f87-5effe3a03611","Type":"ContainerStarted","Data":"9a199045d615853549b325b1d5552f2f2223135e0efe583a2391a8fa4e436379"} Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.187012 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/fdceb63d-e366-47f2-954d-29730788adbb-server-conf\") pod \"rabbitmq-server-2\" (UID: \"fdceb63d-e366-47f2-954d-29730788adbb\") " pod="openstack/rabbitmq-server-2" Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.187562 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bgjdw\" (UniqueName: \"kubernetes.io/projected/fdceb63d-e366-47f2-954d-29730788adbb-kube-api-access-bgjdw\") pod \"rabbitmq-server-2\" (UID: \"fdceb63d-e366-47f2-954d-29730788adbb\") " pod="openstack/rabbitmq-server-2" Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.187654 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/fdceb63d-e366-47f2-954d-29730788adbb-pod-info\") pod \"rabbitmq-server-2\" (UID: \"fdceb63d-e366-47f2-954d-29730788adbb\") " pod="openstack/rabbitmq-server-2" Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.187837 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/fdceb63d-e366-47f2-954d-29730788adbb-rabbitmq-plugins\") pod \"rabbitmq-server-2\" (UID: \"fdceb63d-e366-47f2-954d-29730788adbb\") " pod="openstack/rabbitmq-server-2" Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.187897 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/fdceb63d-e366-47f2-954d-29730788adbb-rabbitmq-tls\") pod \"rabbitmq-server-2\" (UID: \"fdceb63d-e366-47f2-954d-29730788adbb\") " pod="openstack/rabbitmq-server-2" Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.187946 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/fdceb63d-e366-47f2-954d-29730788adbb-rabbitmq-confd\") pod \"rabbitmq-server-2\" (UID: \"fdceb63d-e366-47f2-954d-29730788adbb\") " pod="openstack/rabbitmq-server-2" Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.188096 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/fdceb63d-e366-47f2-954d-29730788adbb-erlang-cookie-secret\") pod \"rabbitmq-server-2\" (UID: \"fdceb63d-e366-47f2-954d-29730788adbb\") " pod="openstack/rabbitmq-server-2" Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.188139 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-2\" (UID: \"fdceb63d-e366-47f2-954d-29730788adbb\") " pod="openstack/rabbitmq-server-2" Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.188213 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fdceb63d-e366-47f2-954d-29730788adbb-config-data\") pod \"rabbitmq-server-2\" (UID: \"fdceb63d-e366-47f2-954d-29730788adbb\") " pod="openstack/rabbitmq-server-2" Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.188393 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/fdceb63d-e366-47f2-954d-29730788adbb-plugins-conf\") pod \"rabbitmq-server-2\" (UID: \"fdceb63d-e366-47f2-954d-29730788adbb\") " pod="openstack/rabbitmq-server-2" Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.188444 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/fdceb63d-e366-47f2-954d-29730788adbb-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-2\" (UID: \"fdceb63d-e366-47f2-954d-29730788adbb\") " pod="openstack/rabbitmq-server-2" Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.191247 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/fdceb63d-e366-47f2-954d-29730788adbb-server-conf\") pod \"rabbitmq-server-2\" (UID: \"fdceb63d-e366-47f2-954d-29730788adbb\") " pod="openstack/rabbitmq-server-2" Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.193401 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fdceb63d-e366-47f2-954d-29730788adbb-config-data\") pod \"rabbitmq-server-2\" (UID: \"fdceb63d-e366-47f2-954d-29730788adbb\") " pod="openstack/rabbitmq-server-2" Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.193399 4854 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-2\" (UID: \"fdceb63d-e366-47f2-954d-29730788adbb\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/rabbitmq-server-2" Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.210892 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/fdceb63d-e366-47f2-954d-29730788adbb-rabbitmq-plugins\") pod \"rabbitmq-server-2\" (UID: \"fdceb63d-e366-47f2-954d-29730788adbb\") " pod="openstack/rabbitmq-server-2" Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.213052 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/fdceb63d-e366-47f2-954d-29730788adbb-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-2\" (UID: \"fdceb63d-e366-47f2-954d-29730788adbb\") " pod="openstack/rabbitmq-server-2" Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.213711 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/fdceb63d-e366-47f2-954d-29730788adbb-pod-info\") pod \"rabbitmq-server-2\" (UID: \"fdceb63d-e366-47f2-954d-29730788adbb\") " pod="openstack/rabbitmq-server-2" Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.214250 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/fdceb63d-e366-47f2-954d-29730788adbb-plugins-conf\") pod \"rabbitmq-server-2\" (UID: \"fdceb63d-e366-47f2-954d-29730788adbb\") " pod="openstack/rabbitmq-server-2" Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.231417 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/fdceb63d-e366-47f2-954d-29730788adbb-erlang-cookie-secret\") pod \"rabbitmq-server-2\" (UID: \"fdceb63d-e366-47f2-954d-29730788adbb\") " pod="openstack/rabbitmq-server-2" Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.232368 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/fdceb63d-e366-47f2-954d-29730788adbb-rabbitmq-confd\") pod \"rabbitmq-server-2\" (UID: \"fdceb63d-e366-47f2-954d-29730788adbb\") " pod="openstack/rabbitmq-server-2" Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.235307 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bgjdw\" (UniqueName: \"kubernetes.io/projected/fdceb63d-e366-47f2-954d-29730788adbb-kube-api-access-bgjdw\") pod \"rabbitmq-server-2\" (UID: \"fdceb63d-e366-47f2-954d-29730788adbb\") " pod="openstack/rabbitmq-server-2" Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.247579 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/fdceb63d-e366-47f2-954d-29730788adbb-rabbitmq-tls\") pod \"rabbitmq-server-2\" (UID: \"fdceb63d-e366-47f2-954d-29730788adbb\") " pod="openstack/rabbitmq-server-2" Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.284303 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-2\" (UID: \"fdceb63d-e366-47f2-954d-29730788adbb\") " pod="openstack/rabbitmq-server-2" Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.340979 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b75489c6f-znnrh"] Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.577236 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 10:02:41 crc kubenswrapper[4854]: I1125 10:02:41.582830 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-2" Nov 25 10:02:42 crc kubenswrapper[4854]: W1125 10:02:42.159944 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfdceb63d_e366_47f2_954d_29730788adbb.slice/crio-583960855ededcf4862bf393078d13a4d39245e7ddfe3c37d5ca2d3e99e573ca WatchSource:0}: Error finding container 583960855ededcf4862bf393078d13a4d39245e7ddfe3c37d5ca2d3e99e573ca: Status 404 returned error can't find the container with id 583960855ededcf4862bf393078d13a4d39245e7ddfe3c37d5ca2d3e99e573ca Nov 25 10:02:42 crc kubenswrapper[4854]: I1125 10:02:42.170789 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-2"] Nov 25 10:02:42 crc kubenswrapper[4854]: I1125 10:02:42.179510 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"bb562532-b4b2-42ad-9d8e-a9b230a3bcf5","Type":"ContainerStarted","Data":"5e5d61793fe371be5f64880195d777798127176aad593617c7681b431cec787e"} Nov 25 10:02:42 crc kubenswrapper[4854]: I1125 10:02:42.182092 4854 generic.go:334] "Generic (PLEG): container finished" podID="f8fac777-00f5-4d15-94d6-c9e0c93ca38e" containerID="c878571d9e579dadfba58ec19ce280da658c4088b918e1be674661f1643e9295" exitCode=0 Nov 25 10:02:42 crc kubenswrapper[4854]: I1125 10:02:42.182148 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b75489c6f-znnrh" event={"ID":"f8fac777-00f5-4d15-94d6-c9e0c93ca38e","Type":"ContainerDied","Data":"c878571d9e579dadfba58ec19ce280da658c4088b918e1be674661f1643e9295"} Nov 25 10:02:42 crc kubenswrapper[4854]: I1125 10:02:42.182173 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b75489c6f-znnrh" event={"ID":"f8fac777-00f5-4d15-94d6-c9e0c93ca38e","Type":"ContainerStarted","Data":"80701b5437b2bcd3798cc66ca0619fc9ef5fe976435b4342e6348c7c7c4cd2f0"} Nov 25 10:02:43 crc kubenswrapper[4854]: I1125 10:02:43.198408 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b75489c6f-znnrh" event={"ID":"f8fac777-00f5-4d15-94d6-c9e0c93ca38e","Type":"ContainerStarted","Data":"20413ca977773f26ecf76393a65f48af507728a83d9868543151a813c3739550"} Nov 25 10:02:43 crc kubenswrapper[4854]: I1125 10:02:43.199830 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5b75489c6f-znnrh" Nov 25 10:02:43 crc kubenswrapper[4854]: I1125 10:02:43.202923 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"fdceb63d-e366-47f2-954d-29730788adbb","Type":"ContainerStarted","Data":"583960855ededcf4862bf393078d13a4d39245e7ddfe3c37d5ca2d3e99e573ca"} Nov 25 10:02:43 crc kubenswrapper[4854]: I1125 10:02:43.228456 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5b75489c6f-znnrh" podStartSLOduration=3.228438507 podStartE2EDuration="3.228438507s" podCreationTimestamp="2025-11-25 10:02:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:02:43.220029274 +0000 UTC m=+1569.073022670" watchObservedRunningTime="2025-11-25 10:02:43.228438507 +0000 UTC m=+1569.081431873" Nov 25 10:02:45 crc kubenswrapper[4854]: I1125 10:02:45.234734 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"fdceb63d-e366-47f2-954d-29730788adbb","Type":"ContainerStarted","Data":"57ca1baa8bef0efad3056f83d970f90ef8e6c7a71b6a9ca8b6aea8c3b958d44b"} Nov 25 10:02:45 crc kubenswrapper[4854]: I1125 10:02:45.238039 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"bb562532-b4b2-42ad-9d8e-a9b230a3bcf5","Type":"ContainerStarted","Data":"a5a5abb0d1685956efb0a36a9f42ccc65101244e88934194a9db13ad768b7250"} Nov 25 10:02:46 crc kubenswrapper[4854]: I1125 10:02:46.255337 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e73606dc-c7c0-4d1e-9f87-5effe3a03611","Type":"ContainerStarted","Data":"ce669fcc85b9b168d62dbba8bc0595be0a0f23702eeb4565e90163c05bf51575"} Nov 25 10:02:47 crc kubenswrapper[4854]: I1125 10:02:47.271445 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e73606dc-c7c0-4d1e-9f87-5effe3a03611","Type":"ContainerStarted","Data":"50ea92f9e65598c8b38caa4b00de505c96787061c191c1514e5acd39d6dddf9d"} Nov 25 10:02:48 crc kubenswrapper[4854]: I1125 10:02:48.284120 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e73606dc-c7c0-4d1e-9f87-5effe3a03611","Type":"ContainerStarted","Data":"d79370f6d7e59edd78d5a700e70a2b5baeefef3f5eefcf5ef7601d245ce83e6a"} Nov 25 10:02:49 crc kubenswrapper[4854]: I1125 10:02:49.302161 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e73606dc-c7c0-4d1e-9f87-5effe3a03611","Type":"ContainerStarted","Data":"a534ad91e3753628b1a94f6899f70069d788cd7d5bc3748631307ed3e4f38286"} Nov 25 10:02:49 crc kubenswrapper[4854]: I1125 10:02:49.304987 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 10:02:49 crc kubenswrapper[4854]: I1125 10:02:49.330858 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=5.927617708 podStartE2EDuration="14.330839629s" podCreationTimestamp="2025-11-25 10:02:35 +0000 UTC" firstStartedPulling="2025-11-25 10:02:40.309365842 +0000 UTC m=+1566.162359218" lastFinishedPulling="2025-11-25 10:02:48.712587763 +0000 UTC m=+1574.565581139" observedRunningTime="2025-11-25 10:02:49.325188652 +0000 UTC m=+1575.178182038" watchObservedRunningTime="2025-11-25 10:02:49.330839629 +0000 UTC m=+1575.183832995" Nov 25 10:02:50 crc kubenswrapper[4854]: I1125 10:02:50.688141 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5b75489c6f-znnrh" Nov 25 10:02:50 crc kubenswrapper[4854]: I1125 10:02:50.786211 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f84f9ccf-gzlbt"] Nov 25 10:02:50 crc kubenswrapper[4854]: I1125 10:02:50.786495 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-f84f9ccf-gzlbt" podUID="b9e714ae-48fc-4b5a-97b5-faa9a902e431" containerName="dnsmasq-dns" containerID="cri-o://64a53c29fbd1aeaf9a1fa6e01b49af054dddfb813c762a8e7658256c5b35821d" gracePeriod=10 Nov 25 10:02:50 crc kubenswrapper[4854]: I1125 10:02:50.973709 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5d75f767dc-mjfvn"] Nov 25 10:02:50 crc kubenswrapper[4854]: I1125 10:02:50.975889 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d75f767dc-mjfvn" Nov 25 10:02:50 crc kubenswrapper[4854]: I1125 10:02:50.995487 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5d75f767dc-mjfvn"] Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.144213 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/51592fc8-630b-49d8-979a-d1ad4c3962f6-openstack-edpm-ipam\") pod \"dnsmasq-dns-5d75f767dc-mjfvn\" (UID: \"51592fc8-630b-49d8-979a-d1ad4c3962f6\") " pod="openstack/dnsmasq-dns-5d75f767dc-mjfvn" Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.144378 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/51592fc8-630b-49d8-979a-d1ad4c3962f6-dns-swift-storage-0\") pod \"dnsmasq-dns-5d75f767dc-mjfvn\" (UID: \"51592fc8-630b-49d8-979a-d1ad4c3962f6\") " pod="openstack/dnsmasq-dns-5d75f767dc-mjfvn" Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.144430 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2bksz\" (UniqueName: \"kubernetes.io/projected/51592fc8-630b-49d8-979a-d1ad4c3962f6-kube-api-access-2bksz\") pod \"dnsmasq-dns-5d75f767dc-mjfvn\" (UID: \"51592fc8-630b-49d8-979a-d1ad4c3962f6\") " pod="openstack/dnsmasq-dns-5d75f767dc-mjfvn" Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.144544 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/51592fc8-630b-49d8-979a-d1ad4c3962f6-dns-svc\") pod \"dnsmasq-dns-5d75f767dc-mjfvn\" (UID: \"51592fc8-630b-49d8-979a-d1ad4c3962f6\") " pod="openstack/dnsmasq-dns-5d75f767dc-mjfvn" Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.144601 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/51592fc8-630b-49d8-979a-d1ad4c3962f6-ovsdbserver-sb\") pod \"dnsmasq-dns-5d75f767dc-mjfvn\" (UID: \"51592fc8-630b-49d8-979a-d1ad4c3962f6\") " pod="openstack/dnsmasq-dns-5d75f767dc-mjfvn" Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.144802 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/51592fc8-630b-49d8-979a-d1ad4c3962f6-ovsdbserver-nb\") pod \"dnsmasq-dns-5d75f767dc-mjfvn\" (UID: \"51592fc8-630b-49d8-979a-d1ad4c3962f6\") " pod="openstack/dnsmasq-dns-5d75f767dc-mjfvn" Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.144909 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/51592fc8-630b-49d8-979a-d1ad4c3962f6-config\") pod \"dnsmasq-dns-5d75f767dc-mjfvn\" (UID: \"51592fc8-630b-49d8-979a-d1ad4c3962f6\") " pod="openstack/dnsmasq-dns-5d75f767dc-mjfvn" Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.247366 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/51592fc8-630b-49d8-979a-d1ad4c3962f6-dns-svc\") pod \"dnsmasq-dns-5d75f767dc-mjfvn\" (UID: \"51592fc8-630b-49d8-979a-d1ad4c3962f6\") " pod="openstack/dnsmasq-dns-5d75f767dc-mjfvn" Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.247418 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/51592fc8-630b-49d8-979a-d1ad4c3962f6-ovsdbserver-sb\") pod \"dnsmasq-dns-5d75f767dc-mjfvn\" (UID: \"51592fc8-630b-49d8-979a-d1ad4c3962f6\") " pod="openstack/dnsmasq-dns-5d75f767dc-mjfvn" Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.247451 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/51592fc8-630b-49d8-979a-d1ad4c3962f6-ovsdbserver-nb\") pod \"dnsmasq-dns-5d75f767dc-mjfvn\" (UID: \"51592fc8-630b-49d8-979a-d1ad4c3962f6\") " pod="openstack/dnsmasq-dns-5d75f767dc-mjfvn" Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.247506 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/51592fc8-630b-49d8-979a-d1ad4c3962f6-config\") pod \"dnsmasq-dns-5d75f767dc-mjfvn\" (UID: \"51592fc8-630b-49d8-979a-d1ad4c3962f6\") " pod="openstack/dnsmasq-dns-5d75f767dc-mjfvn" Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.247578 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/51592fc8-630b-49d8-979a-d1ad4c3962f6-openstack-edpm-ipam\") pod \"dnsmasq-dns-5d75f767dc-mjfvn\" (UID: \"51592fc8-630b-49d8-979a-d1ad4c3962f6\") " pod="openstack/dnsmasq-dns-5d75f767dc-mjfvn" Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.247620 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/51592fc8-630b-49d8-979a-d1ad4c3962f6-dns-swift-storage-0\") pod \"dnsmasq-dns-5d75f767dc-mjfvn\" (UID: \"51592fc8-630b-49d8-979a-d1ad4c3962f6\") " pod="openstack/dnsmasq-dns-5d75f767dc-mjfvn" Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.247643 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2bksz\" (UniqueName: \"kubernetes.io/projected/51592fc8-630b-49d8-979a-d1ad4c3962f6-kube-api-access-2bksz\") pod \"dnsmasq-dns-5d75f767dc-mjfvn\" (UID: \"51592fc8-630b-49d8-979a-d1ad4c3962f6\") " pod="openstack/dnsmasq-dns-5d75f767dc-mjfvn" Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.252980 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/51592fc8-630b-49d8-979a-d1ad4c3962f6-dns-svc\") pod \"dnsmasq-dns-5d75f767dc-mjfvn\" (UID: \"51592fc8-630b-49d8-979a-d1ad4c3962f6\") " pod="openstack/dnsmasq-dns-5d75f767dc-mjfvn" Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.253752 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/51592fc8-630b-49d8-979a-d1ad4c3962f6-ovsdbserver-sb\") pod \"dnsmasq-dns-5d75f767dc-mjfvn\" (UID: \"51592fc8-630b-49d8-979a-d1ad4c3962f6\") " pod="openstack/dnsmasq-dns-5d75f767dc-mjfvn" Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.254259 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/51592fc8-630b-49d8-979a-d1ad4c3962f6-ovsdbserver-nb\") pod \"dnsmasq-dns-5d75f767dc-mjfvn\" (UID: \"51592fc8-630b-49d8-979a-d1ad4c3962f6\") " pod="openstack/dnsmasq-dns-5d75f767dc-mjfvn" Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.254846 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/51592fc8-630b-49d8-979a-d1ad4c3962f6-config\") pod \"dnsmasq-dns-5d75f767dc-mjfvn\" (UID: \"51592fc8-630b-49d8-979a-d1ad4c3962f6\") " pod="openstack/dnsmasq-dns-5d75f767dc-mjfvn" Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.255595 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/51592fc8-630b-49d8-979a-d1ad4c3962f6-dns-swift-storage-0\") pod \"dnsmasq-dns-5d75f767dc-mjfvn\" (UID: \"51592fc8-630b-49d8-979a-d1ad4c3962f6\") " pod="openstack/dnsmasq-dns-5d75f767dc-mjfvn" Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.255857 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/51592fc8-630b-49d8-979a-d1ad4c3962f6-openstack-edpm-ipam\") pod \"dnsmasq-dns-5d75f767dc-mjfvn\" (UID: \"51592fc8-630b-49d8-979a-d1ad4c3962f6\") " pod="openstack/dnsmasq-dns-5d75f767dc-mjfvn" Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.292578 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2bksz\" (UniqueName: \"kubernetes.io/projected/51592fc8-630b-49d8-979a-d1ad4c3962f6-kube-api-access-2bksz\") pod \"dnsmasq-dns-5d75f767dc-mjfvn\" (UID: \"51592fc8-630b-49d8-979a-d1ad4c3962f6\") " pod="openstack/dnsmasq-dns-5d75f767dc-mjfvn" Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.342600 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d75f767dc-mjfvn" Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.389954 4854 generic.go:334] "Generic (PLEG): container finished" podID="b9e714ae-48fc-4b5a-97b5-faa9a902e431" containerID="64a53c29fbd1aeaf9a1fa6e01b49af054dddfb813c762a8e7658256c5b35821d" exitCode=0 Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.390009 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f84f9ccf-gzlbt" event={"ID":"b9e714ae-48fc-4b5a-97b5-faa9a902e431","Type":"ContainerDied","Data":"64a53c29fbd1aeaf9a1fa6e01b49af054dddfb813c762a8e7658256c5b35821d"} Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.603423 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f84f9ccf-gzlbt" Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.760871 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b9e714ae-48fc-4b5a-97b5-faa9a902e431-config\") pod \"b9e714ae-48fc-4b5a-97b5-faa9a902e431\" (UID: \"b9e714ae-48fc-4b5a-97b5-faa9a902e431\") " Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.761106 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b9e714ae-48fc-4b5a-97b5-faa9a902e431-dns-swift-storage-0\") pod \"b9e714ae-48fc-4b5a-97b5-faa9a902e431\" (UID: \"b9e714ae-48fc-4b5a-97b5-faa9a902e431\") " Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.761125 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b9e714ae-48fc-4b5a-97b5-faa9a902e431-dns-svc\") pod \"b9e714ae-48fc-4b5a-97b5-faa9a902e431\" (UID: \"b9e714ae-48fc-4b5a-97b5-faa9a902e431\") " Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.761162 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b9e714ae-48fc-4b5a-97b5-faa9a902e431-ovsdbserver-sb\") pod \"b9e714ae-48fc-4b5a-97b5-faa9a902e431\" (UID: \"b9e714ae-48fc-4b5a-97b5-faa9a902e431\") " Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.761274 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gmsjh\" (UniqueName: \"kubernetes.io/projected/b9e714ae-48fc-4b5a-97b5-faa9a902e431-kube-api-access-gmsjh\") pod \"b9e714ae-48fc-4b5a-97b5-faa9a902e431\" (UID: \"b9e714ae-48fc-4b5a-97b5-faa9a902e431\") " Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.761309 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b9e714ae-48fc-4b5a-97b5-faa9a902e431-ovsdbserver-nb\") pod \"b9e714ae-48fc-4b5a-97b5-faa9a902e431\" (UID: \"b9e714ae-48fc-4b5a-97b5-faa9a902e431\") " Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.768056 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9e714ae-48fc-4b5a-97b5-faa9a902e431-kube-api-access-gmsjh" (OuterVolumeSpecName: "kube-api-access-gmsjh") pod "b9e714ae-48fc-4b5a-97b5-faa9a902e431" (UID: "b9e714ae-48fc-4b5a-97b5-faa9a902e431"). InnerVolumeSpecName "kube-api-access-gmsjh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.827964 4854 scope.go:117] "RemoveContainer" containerID="2bad04825ff422fba7e27adfcfde3a3f15ea7f17719c61e4b194e5b6c44d2e2e" Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.832427 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b9e714ae-48fc-4b5a-97b5-faa9a902e431-config" (OuterVolumeSpecName: "config") pod "b9e714ae-48fc-4b5a-97b5-faa9a902e431" (UID: "b9e714ae-48fc-4b5a-97b5-faa9a902e431"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.846315 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b9e714ae-48fc-4b5a-97b5-faa9a902e431-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b9e714ae-48fc-4b5a-97b5-faa9a902e431" (UID: "b9e714ae-48fc-4b5a-97b5-faa9a902e431"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.851521 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b9e714ae-48fc-4b5a-97b5-faa9a902e431-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b9e714ae-48fc-4b5a-97b5-faa9a902e431" (UID: "b9e714ae-48fc-4b5a-97b5-faa9a902e431"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.860012 4854 scope.go:117] "RemoveContainer" containerID="dab93c541d07e9db5e519b4a7847185e348f04d7812adc0288dedc1c982fc1d0" Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.862141 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b9e714ae-48fc-4b5a-97b5-faa9a902e431-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b9e714ae-48fc-4b5a-97b5-faa9a902e431" (UID: "b9e714ae-48fc-4b5a-97b5-faa9a902e431"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.865430 4854 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b9e714ae-48fc-4b5a-97b5-faa9a902e431-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.865457 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gmsjh\" (UniqueName: \"kubernetes.io/projected/b9e714ae-48fc-4b5a-97b5-faa9a902e431-kube-api-access-gmsjh\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.865468 4854 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b9e714ae-48fc-4b5a-97b5-faa9a902e431-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.865477 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b9e714ae-48fc-4b5a-97b5-faa9a902e431-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.865492 4854 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b9e714ae-48fc-4b5a-97b5-faa9a902e431-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.869125 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b9e714ae-48fc-4b5a-97b5-faa9a902e431-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "b9e714ae-48fc-4b5a-97b5-faa9a902e431" (UID: "b9e714ae-48fc-4b5a-97b5-faa9a902e431"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:02:51 crc kubenswrapper[4854]: I1125 10:02:51.967574 4854 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b9e714ae-48fc-4b5a-97b5-faa9a902e431-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:52 crc kubenswrapper[4854]: I1125 10:02:52.003802 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5d75f767dc-mjfvn"] Nov 25 10:02:52 crc kubenswrapper[4854]: I1125 10:02:52.421740 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-hcn8p" event={"ID":"7909b58c-9614-4859-b5c9-be2fd2c77fc8","Type":"ContainerStarted","Data":"58304f7ced76aa1063423fc3490d0ef5b0eab8902366ec37e91c5e35090eda2c"} Nov 25 10:02:52 crc kubenswrapper[4854]: I1125 10:02:52.430469 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f84f9ccf-gzlbt" event={"ID":"b9e714ae-48fc-4b5a-97b5-faa9a902e431","Type":"ContainerDied","Data":"63ba9027cd9b047ca15b15ac4e5aff4cddf08f32efe5b355c786619e1df3b9f6"} Nov 25 10:02:52 crc kubenswrapper[4854]: I1125 10:02:52.430533 4854 scope.go:117] "RemoveContainer" containerID="64a53c29fbd1aeaf9a1fa6e01b49af054dddfb813c762a8e7658256c5b35821d" Nov 25 10:02:52 crc kubenswrapper[4854]: I1125 10:02:52.430735 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f84f9ccf-gzlbt" Nov 25 10:02:52 crc kubenswrapper[4854]: I1125 10:02:52.437142 4854 generic.go:334] "Generic (PLEG): container finished" podID="51592fc8-630b-49d8-979a-d1ad4c3962f6" containerID="9043d3c87d8fe7ddb4222dae0e576138ca499f27d79b8fcf35511979d64754d5" exitCode=0 Nov 25 10:02:52 crc kubenswrapper[4854]: I1125 10:02:52.437317 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d75f767dc-mjfvn" event={"ID":"51592fc8-630b-49d8-979a-d1ad4c3962f6","Type":"ContainerDied","Data":"9043d3c87d8fe7ddb4222dae0e576138ca499f27d79b8fcf35511979d64754d5"} Nov 25 10:02:52 crc kubenswrapper[4854]: I1125 10:02:52.437354 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d75f767dc-mjfvn" event={"ID":"51592fc8-630b-49d8-979a-d1ad4c3962f6","Type":"ContainerStarted","Data":"70daab06cb2d3ddfbad6704d9e5e5eee63c03cf0b4a7d319e1977242b076544a"} Nov 25 10:02:52 crc kubenswrapper[4854]: I1125 10:02:52.454795 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-sync-hcn8p" podStartSLOduration=2.696555741 podStartE2EDuration="32.454774482s" podCreationTimestamp="2025-11-25 10:02:20 +0000 UTC" firstStartedPulling="2025-11-25 10:02:21.492107316 +0000 UTC m=+1547.345100692" lastFinishedPulling="2025-11-25 10:02:51.250326057 +0000 UTC m=+1577.103319433" observedRunningTime="2025-11-25 10:02:52.443208242 +0000 UTC m=+1578.296201628" watchObservedRunningTime="2025-11-25 10:02:52.454774482 +0000 UTC m=+1578.307767858" Nov 25 10:02:52 crc kubenswrapper[4854]: I1125 10:02:52.493217 4854 scope.go:117] "RemoveContainer" containerID="42b8951e042daee0ad7de783340763a95f09a13dc766c228bd858b7335b14bf5" Nov 25 10:02:52 crc kubenswrapper[4854]: I1125 10:02:52.546735 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f84f9ccf-gzlbt"] Nov 25 10:02:52 crc kubenswrapper[4854]: I1125 10:02:52.572906 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-f84f9ccf-gzlbt"] Nov 25 10:02:53 crc kubenswrapper[4854]: I1125 10:02:53.029207 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9e714ae-48fc-4b5a-97b5-faa9a902e431" path="/var/lib/kubelet/pods/b9e714ae-48fc-4b5a-97b5-faa9a902e431/volumes" Nov 25 10:02:53 crc kubenswrapper[4854]: I1125 10:02:53.449859 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d75f767dc-mjfvn" event={"ID":"51592fc8-630b-49d8-979a-d1ad4c3962f6","Type":"ContainerStarted","Data":"d52083a779354beec8c886305f570c14a619c9c5d993714fd2ea3eda1c08eeec"} Nov 25 10:02:53 crc kubenswrapper[4854]: I1125 10:02:53.451308 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5d75f767dc-mjfvn" Nov 25 10:02:53 crc kubenswrapper[4854]: I1125 10:02:53.482876 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5d75f767dc-mjfvn" podStartSLOduration=3.482860387 podStartE2EDuration="3.482860387s" podCreationTimestamp="2025-11-25 10:02:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:02:53.471114242 +0000 UTC m=+1579.324107628" watchObservedRunningTime="2025-11-25 10:02:53.482860387 +0000 UTC m=+1579.335853763" Nov 25 10:02:54 crc kubenswrapper[4854]: I1125 10:02:54.468096 4854 generic.go:334] "Generic (PLEG): container finished" podID="7909b58c-9614-4859-b5c9-be2fd2c77fc8" containerID="58304f7ced76aa1063423fc3490d0ef5b0eab8902366ec37e91c5e35090eda2c" exitCode=0 Nov 25 10:02:54 crc kubenswrapper[4854]: I1125 10:02:54.468168 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-hcn8p" event={"ID":"7909b58c-9614-4859-b5c9-be2fd2c77fc8","Type":"ContainerDied","Data":"58304f7ced76aa1063423fc3490d0ef5b0eab8902366ec37e91c5e35090eda2c"} Nov 25 10:02:55 crc kubenswrapper[4854]: I1125 10:02:55.028358 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:02:55 crc kubenswrapper[4854]: I1125 10:02:55.028411 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:02:55 crc kubenswrapper[4854]: I1125 10:02:55.028446 4854 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" Nov 25 10:02:55 crc kubenswrapper[4854]: I1125 10:02:55.029302 4854 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e218b542fd934fd34b157757f419e89c8565fa64cb58598ebd3da742271577ef"} pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 10:02:55 crc kubenswrapper[4854]: I1125 10:02:55.029378 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" containerID="cri-o://e218b542fd934fd34b157757f419e89c8565fa64cb58598ebd3da742271577ef" gracePeriod=600 Nov 25 10:02:55 crc kubenswrapper[4854]: E1125 10:02:55.153798 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:02:55 crc kubenswrapper[4854]: I1125 10:02:55.487702 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerDied","Data":"e218b542fd934fd34b157757f419e89c8565fa64cb58598ebd3da742271577ef"} Nov 25 10:02:55 crc kubenswrapper[4854]: I1125 10:02:55.487774 4854 scope.go:117] "RemoveContainer" containerID="65f6bcfa40b1e5bbb70c379bd608e17d8c0ff4d22430507df2078040825b6744" Nov 25 10:02:55 crc kubenswrapper[4854]: I1125 10:02:55.487894 4854 generic.go:334] "Generic (PLEG): container finished" podID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerID="e218b542fd934fd34b157757f419e89c8565fa64cb58598ebd3da742271577ef" exitCode=0 Nov 25 10:02:55 crc kubenswrapper[4854]: I1125 10:02:55.489068 4854 scope.go:117] "RemoveContainer" containerID="e218b542fd934fd34b157757f419e89c8565fa64cb58598ebd3da742271577ef" Nov 25 10:02:55 crc kubenswrapper[4854]: E1125 10:02:55.489969 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:02:55 crc kubenswrapper[4854]: I1125 10:02:55.978602 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-hcn8p" Nov 25 10:02:56 crc kubenswrapper[4854]: I1125 10:02:56.076401 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xg9db\" (UniqueName: \"kubernetes.io/projected/7909b58c-9614-4859-b5c9-be2fd2c77fc8-kube-api-access-xg9db\") pod \"7909b58c-9614-4859-b5c9-be2fd2c77fc8\" (UID: \"7909b58c-9614-4859-b5c9-be2fd2c77fc8\") " Nov 25 10:02:56 crc kubenswrapper[4854]: I1125 10:02:56.076574 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7909b58c-9614-4859-b5c9-be2fd2c77fc8-combined-ca-bundle\") pod \"7909b58c-9614-4859-b5c9-be2fd2c77fc8\" (UID: \"7909b58c-9614-4859-b5c9-be2fd2c77fc8\") " Nov 25 10:02:56 crc kubenswrapper[4854]: I1125 10:02:56.076751 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7909b58c-9614-4859-b5c9-be2fd2c77fc8-config-data\") pod \"7909b58c-9614-4859-b5c9-be2fd2c77fc8\" (UID: \"7909b58c-9614-4859-b5c9-be2fd2c77fc8\") " Nov 25 10:02:56 crc kubenswrapper[4854]: I1125 10:02:56.082461 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7909b58c-9614-4859-b5c9-be2fd2c77fc8-kube-api-access-xg9db" (OuterVolumeSpecName: "kube-api-access-xg9db") pod "7909b58c-9614-4859-b5c9-be2fd2c77fc8" (UID: "7909b58c-9614-4859-b5c9-be2fd2c77fc8"). InnerVolumeSpecName "kube-api-access-xg9db". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:02:56 crc kubenswrapper[4854]: I1125 10:02:56.122797 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7909b58c-9614-4859-b5c9-be2fd2c77fc8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7909b58c-9614-4859-b5c9-be2fd2c77fc8" (UID: "7909b58c-9614-4859-b5c9-be2fd2c77fc8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:02:56 crc kubenswrapper[4854]: I1125 10:02:56.178871 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7909b58c-9614-4859-b5c9-be2fd2c77fc8-config-data" (OuterVolumeSpecName: "config-data") pod "7909b58c-9614-4859-b5c9-be2fd2c77fc8" (UID: "7909b58c-9614-4859-b5c9-be2fd2c77fc8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:02:56 crc kubenswrapper[4854]: I1125 10:02:56.179950 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7909b58c-9614-4859-b5c9-be2fd2c77fc8-config-data\") pod \"7909b58c-9614-4859-b5c9-be2fd2c77fc8\" (UID: \"7909b58c-9614-4859-b5c9-be2fd2c77fc8\") " Nov 25 10:02:56 crc kubenswrapper[4854]: W1125 10:02:56.180793 4854 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/7909b58c-9614-4859-b5c9-be2fd2c77fc8/volumes/kubernetes.io~secret/config-data Nov 25 10:02:56 crc kubenswrapper[4854]: I1125 10:02:56.180816 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7909b58c-9614-4859-b5c9-be2fd2c77fc8-config-data" (OuterVolumeSpecName: "config-data") pod "7909b58c-9614-4859-b5c9-be2fd2c77fc8" (UID: "7909b58c-9614-4859-b5c9-be2fd2c77fc8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:02:56 crc kubenswrapper[4854]: I1125 10:02:56.182391 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xg9db\" (UniqueName: \"kubernetes.io/projected/7909b58c-9614-4859-b5c9-be2fd2c77fc8-kube-api-access-xg9db\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:56 crc kubenswrapper[4854]: I1125 10:02:56.182422 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7909b58c-9614-4859-b5c9-be2fd2c77fc8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:56 crc kubenswrapper[4854]: I1125 10:02:56.182435 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7909b58c-9614-4859-b5c9-be2fd2c77fc8-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:56 crc kubenswrapper[4854]: I1125 10:02:56.505084 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-hcn8p" event={"ID":"7909b58c-9614-4859-b5c9-be2fd2c77fc8","Type":"ContainerDied","Data":"15a424c324269ae502de1397a46ca1cf371d3f4b28cb2c55bcb5e9df6403ca33"} Nov 25 10:02:56 crc kubenswrapper[4854]: I1125 10:02:56.505133 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="15a424c324269ae502de1397a46ca1cf371d3f4b28cb2c55bcb5e9df6403ca33" Nov 25 10:02:56 crc kubenswrapper[4854]: I1125 10:02:56.505141 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-hcn8p" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.485357 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-5688985587-fs97x"] Nov 25 10:02:57 crc kubenswrapper[4854]: E1125 10:02:57.486080 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9e714ae-48fc-4b5a-97b5-faa9a902e431" containerName="init" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.486103 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9e714ae-48fc-4b5a-97b5-faa9a902e431" containerName="init" Nov 25 10:02:57 crc kubenswrapper[4854]: E1125 10:02:57.486135 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7909b58c-9614-4859-b5c9-be2fd2c77fc8" containerName="heat-db-sync" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.486144 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="7909b58c-9614-4859-b5c9-be2fd2c77fc8" containerName="heat-db-sync" Nov 25 10:02:57 crc kubenswrapper[4854]: E1125 10:02:57.486166 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9e714ae-48fc-4b5a-97b5-faa9a902e431" containerName="dnsmasq-dns" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.486175 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9e714ae-48fc-4b5a-97b5-faa9a902e431" containerName="dnsmasq-dns" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.486433 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="7909b58c-9614-4859-b5c9-be2fd2c77fc8" containerName="heat-db-sync" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.486483 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9e714ae-48fc-4b5a-97b5-faa9a902e431" containerName="dnsmasq-dns" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.487511 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-5688985587-fs97x" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.506105 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-5688985587-fs97x"] Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.555747 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-77db595d79-zsh9l"] Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.557857 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-77db595d79-zsh9l" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.579436 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-5f59c54579-mqcrl"] Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.581226 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5f59c54579-mqcrl" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.603591 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-77db595d79-zsh9l"] Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.619295 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/935ce673-59b2-4651-9193-12afdb60ed71-config-data\") pod \"heat-engine-5688985587-fs97x\" (UID: \"935ce673-59b2-4651-9193-12afdb60ed71\") " pod="openstack/heat-engine-5688985587-fs97x" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.619409 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d-config-data\") pod \"heat-cfnapi-77db595d79-zsh9l\" (UID: \"5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d\") " pod="openstack/heat-cfnapi-77db595d79-zsh9l" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.619502 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d-config-data-custom\") pod \"heat-cfnapi-77db595d79-zsh9l\" (UID: \"5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d\") " pod="openstack/heat-cfnapi-77db595d79-zsh9l" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.619773 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d-combined-ca-bundle\") pod \"heat-cfnapi-77db595d79-zsh9l\" (UID: \"5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d\") " pod="openstack/heat-cfnapi-77db595d79-zsh9l" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.619920 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d-internal-tls-certs\") pod \"heat-cfnapi-77db595d79-zsh9l\" (UID: \"5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d\") " pod="openstack/heat-cfnapi-77db595d79-zsh9l" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.619942 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6jdbk\" (UniqueName: \"kubernetes.io/projected/5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d-kube-api-access-6jdbk\") pod \"heat-cfnapi-77db595d79-zsh9l\" (UID: \"5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d\") " pod="openstack/heat-cfnapi-77db595d79-zsh9l" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.620040 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/935ce673-59b2-4651-9193-12afdb60ed71-config-data-custom\") pod \"heat-engine-5688985587-fs97x\" (UID: \"935ce673-59b2-4651-9193-12afdb60ed71\") " pod="openstack/heat-engine-5688985587-fs97x" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.620074 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d-public-tls-certs\") pod \"heat-cfnapi-77db595d79-zsh9l\" (UID: \"5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d\") " pod="openstack/heat-cfnapi-77db595d79-zsh9l" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.620207 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/935ce673-59b2-4651-9193-12afdb60ed71-combined-ca-bundle\") pod \"heat-engine-5688985587-fs97x\" (UID: \"935ce673-59b2-4651-9193-12afdb60ed71\") " pod="openstack/heat-engine-5688985587-fs97x" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.620272 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lhztd\" (UniqueName: \"kubernetes.io/projected/935ce673-59b2-4651-9193-12afdb60ed71-kube-api-access-lhztd\") pod \"heat-engine-5688985587-fs97x\" (UID: \"935ce673-59b2-4651-9193-12afdb60ed71\") " pod="openstack/heat-engine-5688985587-fs97x" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.633430 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-5f59c54579-mqcrl"] Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.722791 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0bdea020-6056-4c2f-afb7-c715fb1116c0-config-data-custom\") pod \"heat-api-5f59c54579-mqcrl\" (UID: \"0bdea020-6056-4c2f-afb7-c715fb1116c0\") " pod="openstack/heat-api-5f59c54579-mqcrl" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.723179 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/935ce673-59b2-4651-9193-12afdb60ed71-combined-ca-bundle\") pod \"heat-engine-5688985587-fs97x\" (UID: \"935ce673-59b2-4651-9193-12afdb60ed71\") " pod="openstack/heat-engine-5688985587-fs97x" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.723211 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bdea020-6056-4c2f-afb7-c715fb1116c0-config-data\") pod \"heat-api-5f59c54579-mqcrl\" (UID: \"0bdea020-6056-4c2f-afb7-c715fb1116c0\") " pod="openstack/heat-api-5f59c54579-mqcrl" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.723239 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0bdea020-6056-4c2f-afb7-c715fb1116c0-public-tls-certs\") pod \"heat-api-5f59c54579-mqcrl\" (UID: \"0bdea020-6056-4c2f-afb7-c715fb1116c0\") " pod="openstack/heat-api-5f59c54579-mqcrl" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.723290 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lhztd\" (UniqueName: \"kubernetes.io/projected/935ce673-59b2-4651-9193-12afdb60ed71-kube-api-access-lhztd\") pod \"heat-engine-5688985587-fs97x\" (UID: \"935ce673-59b2-4651-9193-12afdb60ed71\") " pod="openstack/heat-engine-5688985587-fs97x" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.723316 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdea020-6056-4c2f-afb7-c715fb1116c0-combined-ca-bundle\") pod \"heat-api-5f59c54579-mqcrl\" (UID: \"0bdea020-6056-4c2f-afb7-c715fb1116c0\") " pod="openstack/heat-api-5f59c54579-mqcrl" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.723340 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2c6nj\" (UniqueName: \"kubernetes.io/projected/0bdea020-6056-4c2f-afb7-c715fb1116c0-kube-api-access-2c6nj\") pod \"heat-api-5f59c54579-mqcrl\" (UID: \"0bdea020-6056-4c2f-afb7-c715fb1116c0\") " pod="openstack/heat-api-5f59c54579-mqcrl" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.723412 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/935ce673-59b2-4651-9193-12afdb60ed71-config-data\") pod \"heat-engine-5688985587-fs97x\" (UID: \"935ce673-59b2-4651-9193-12afdb60ed71\") " pod="openstack/heat-engine-5688985587-fs97x" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.723472 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d-config-data\") pod \"heat-cfnapi-77db595d79-zsh9l\" (UID: \"5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d\") " pod="openstack/heat-cfnapi-77db595d79-zsh9l" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.723504 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d-config-data-custom\") pod \"heat-cfnapi-77db595d79-zsh9l\" (UID: \"5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d\") " pod="openstack/heat-cfnapi-77db595d79-zsh9l" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.723609 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d-combined-ca-bundle\") pod \"heat-cfnapi-77db595d79-zsh9l\" (UID: \"5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d\") " pod="openstack/heat-cfnapi-77db595d79-zsh9l" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.723694 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d-internal-tls-certs\") pod \"heat-cfnapi-77db595d79-zsh9l\" (UID: \"5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d\") " pod="openstack/heat-cfnapi-77db595d79-zsh9l" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.723719 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6jdbk\" (UniqueName: \"kubernetes.io/projected/5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d-kube-api-access-6jdbk\") pod \"heat-cfnapi-77db595d79-zsh9l\" (UID: \"5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d\") " pod="openstack/heat-cfnapi-77db595d79-zsh9l" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.723770 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0bdea020-6056-4c2f-afb7-c715fb1116c0-internal-tls-certs\") pod \"heat-api-5f59c54579-mqcrl\" (UID: \"0bdea020-6056-4c2f-afb7-c715fb1116c0\") " pod="openstack/heat-api-5f59c54579-mqcrl" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.723804 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/935ce673-59b2-4651-9193-12afdb60ed71-config-data-custom\") pod \"heat-engine-5688985587-fs97x\" (UID: \"935ce673-59b2-4651-9193-12afdb60ed71\") " pod="openstack/heat-engine-5688985587-fs97x" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.723833 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d-public-tls-certs\") pod \"heat-cfnapi-77db595d79-zsh9l\" (UID: \"5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d\") " pod="openstack/heat-cfnapi-77db595d79-zsh9l" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.729453 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d-combined-ca-bundle\") pod \"heat-cfnapi-77db595d79-zsh9l\" (UID: \"5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d\") " pod="openstack/heat-cfnapi-77db595d79-zsh9l" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.729899 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d-internal-tls-certs\") pod \"heat-cfnapi-77db595d79-zsh9l\" (UID: \"5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d\") " pod="openstack/heat-cfnapi-77db595d79-zsh9l" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.731432 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/935ce673-59b2-4651-9193-12afdb60ed71-combined-ca-bundle\") pod \"heat-engine-5688985587-fs97x\" (UID: \"935ce673-59b2-4651-9193-12afdb60ed71\") " pod="openstack/heat-engine-5688985587-fs97x" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.731654 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/935ce673-59b2-4651-9193-12afdb60ed71-config-data\") pod \"heat-engine-5688985587-fs97x\" (UID: \"935ce673-59b2-4651-9193-12afdb60ed71\") " pod="openstack/heat-engine-5688985587-fs97x" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.733375 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d-config-data-custom\") pod \"heat-cfnapi-77db595d79-zsh9l\" (UID: \"5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d\") " pod="openstack/heat-cfnapi-77db595d79-zsh9l" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.733998 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/935ce673-59b2-4651-9193-12afdb60ed71-config-data-custom\") pod \"heat-engine-5688985587-fs97x\" (UID: \"935ce673-59b2-4651-9193-12afdb60ed71\") " pod="openstack/heat-engine-5688985587-fs97x" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.734352 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d-public-tls-certs\") pod \"heat-cfnapi-77db595d79-zsh9l\" (UID: \"5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d\") " pod="openstack/heat-cfnapi-77db595d79-zsh9l" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.742213 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d-config-data\") pod \"heat-cfnapi-77db595d79-zsh9l\" (UID: \"5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d\") " pod="openstack/heat-cfnapi-77db595d79-zsh9l" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.746624 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6jdbk\" (UniqueName: \"kubernetes.io/projected/5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d-kube-api-access-6jdbk\") pod \"heat-cfnapi-77db595d79-zsh9l\" (UID: \"5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d\") " pod="openstack/heat-cfnapi-77db595d79-zsh9l" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.747434 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lhztd\" (UniqueName: \"kubernetes.io/projected/935ce673-59b2-4651-9193-12afdb60ed71-kube-api-access-lhztd\") pod \"heat-engine-5688985587-fs97x\" (UID: \"935ce673-59b2-4651-9193-12afdb60ed71\") " pod="openstack/heat-engine-5688985587-fs97x" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.807162 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-5688985587-fs97x" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.827222 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0bdea020-6056-4c2f-afb7-c715fb1116c0-internal-tls-certs\") pod \"heat-api-5f59c54579-mqcrl\" (UID: \"0bdea020-6056-4c2f-afb7-c715fb1116c0\") " pod="openstack/heat-api-5f59c54579-mqcrl" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.827313 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0bdea020-6056-4c2f-afb7-c715fb1116c0-config-data-custom\") pod \"heat-api-5f59c54579-mqcrl\" (UID: \"0bdea020-6056-4c2f-afb7-c715fb1116c0\") " pod="openstack/heat-api-5f59c54579-mqcrl" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.827361 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bdea020-6056-4c2f-afb7-c715fb1116c0-config-data\") pod \"heat-api-5f59c54579-mqcrl\" (UID: \"0bdea020-6056-4c2f-afb7-c715fb1116c0\") " pod="openstack/heat-api-5f59c54579-mqcrl" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.827389 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0bdea020-6056-4c2f-afb7-c715fb1116c0-public-tls-certs\") pod \"heat-api-5f59c54579-mqcrl\" (UID: \"0bdea020-6056-4c2f-afb7-c715fb1116c0\") " pod="openstack/heat-api-5f59c54579-mqcrl" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.827427 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdea020-6056-4c2f-afb7-c715fb1116c0-combined-ca-bundle\") pod \"heat-api-5f59c54579-mqcrl\" (UID: \"0bdea020-6056-4c2f-afb7-c715fb1116c0\") " pod="openstack/heat-api-5f59c54579-mqcrl" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.827451 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2c6nj\" (UniqueName: \"kubernetes.io/projected/0bdea020-6056-4c2f-afb7-c715fb1116c0-kube-api-access-2c6nj\") pod \"heat-api-5f59c54579-mqcrl\" (UID: \"0bdea020-6056-4c2f-afb7-c715fb1116c0\") " pod="openstack/heat-api-5f59c54579-mqcrl" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.832191 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdea020-6056-4c2f-afb7-c715fb1116c0-combined-ca-bundle\") pod \"heat-api-5f59c54579-mqcrl\" (UID: \"0bdea020-6056-4c2f-afb7-c715fb1116c0\") " pod="openstack/heat-api-5f59c54579-mqcrl" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.834330 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0bdea020-6056-4c2f-afb7-c715fb1116c0-config-data-custom\") pod \"heat-api-5f59c54579-mqcrl\" (UID: \"0bdea020-6056-4c2f-afb7-c715fb1116c0\") " pod="openstack/heat-api-5f59c54579-mqcrl" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.838009 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0bdea020-6056-4c2f-afb7-c715fb1116c0-internal-tls-certs\") pod \"heat-api-5f59c54579-mqcrl\" (UID: \"0bdea020-6056-4c2f-afb7-c715fb1116c0\") " pod="openstack/heat-api-5f59c54579-mqcrl" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.839263 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bdea020-6056-4c2f-afb7-c715fb1116c0-config-data\") pod \"heat-api-5f59c54579-mqcrl\" (UID: \"0bdea020-6056-4c2f-afb7-c715fb1116c0\") " pod="openstack/heat-api-5f59c54579-mqcrl" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.841489 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0bdea020-6056-4c2f-afb7-c715fb1116c0-public-tls-certs\") pod \"heat-api-5f59c54579-mqcrl\" (UID: \"0bdea020-6056-4c2f-afb7-c715fb1116c0\") " pod="openstack/heat-api-5f59c54579-mqcrl" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.849415 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2c6nj\" (UniqueName: \"kubernetes.io/projected/0bdea020-6056-4c2f-afb7-c715fb1116c0-kube-api-access-2c6nj\") pod \"heat-api-5f59c54579-mqcrl\" (UID: \"0bdea020-6056-4c2f-afb7-c715fb1116c0\") " pod="openstack/heat-api-5f59c54579-mqcrl" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.888693 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-77db595d79-zsh9l" Nov 25 10:02:57 crc kubenswrapper[4854]: I1125 10:02:57.904227 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5f59c54579-mqcrl" Nov 25 10:02:58 crc kubenswrapper[4854]: I1125 10:02:58.399748 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-5688985587-fs97x"] Nov 25 10:02:58 crc kubenswrapper[4854]: I1125 10:02:58.532381 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-5688985587-fs97x" event={"ID":"935ce673-59b2-4651-9193-12afdb60ed71","Type":"ContainerStarted","Data":"b7b031ef27aa6bc25ad1b93955d67c23f15fd48d7487514b5718c51ba83485b7"} Nov 25 10:02:58 crc kubenswrapper[4854]: I1125 10:02:58.561698 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-77db595d79-zsh9l"] Nov 25 10:02:58 crc kubenswrapper[4854]: W1125 10:02:58.569898 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5d90bbbc_ccfe_4462_a8b5_3d3c4cbbcf3d.slice/crio-ce1918e7b92a052e2c31347d941e9966df396d70897cb705ecb769f0054b15e9 WatchSource:0}: Error finding container ce1918e7b92a052e2c31347d941e9966df396d70897cb705ecb769f0054b15e9: Status 404 returned error can't find the container with id ce1918e7b92a052e2c31347d941e9966df396d70897cb705ecb769f0054b15e9 Nov 25 10:02:58 crc kubenswrapper[4854]: W1125 10:02:58.662572 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0bdea020_6056_4c2f_afb7_c715fb1116c0.slice/crio-505212f10e00b68ad6f3ea3e8a9a92eee82ec321c00e39810cb46234e376be22 WatchSource:0}: Error finding container 505212f10e00b68ad6f3ea3e8a9a92eee82ec321c00e39810cb46234e376be22: Status 404 returned error can't find the container with id 505212f10e00b68ad6f3ea3e8a9a92eee82ec321c00e39810cb46234e376be22 Nov 25 10:02:58 crc kubenswrapper[4854]: I1125 10:02:58.668478 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-5f59c54579-mqcrl"] Nov 25 10:02:59 crc kubenswrapper[4854]: I1125 10:02:59.543905 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-77db595d79-zsh9l" event={"ID":"5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d","Type":"ContainerStarted","Data":"ce1918e7b92a052e2c31347d941e9966df396d70897cb705ecb769f0054b15e9"} Nov 25 10:02:59 crc kubenswrapper[4854]: I1125 10:02:59.545469 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5f59c54579-mqcrl" event={"ID":"0bdea020-6056-4c2f-afb7-c715fb1116c0","Type":"ContainerStarted","Data":"505212f10e00b68ad6f3ea3e8a9a92eee82ec321c00e39810cb46234e376be22"} Nov 25 10:02:59 crc kubenswrapper[4854]: I1125 10:02:59.556994 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-5688985587-fs97x" event={"ID":"935ce673-59b2-4651-9193-12afdb60ed71","Type":"ContainerStarted","Data":"546cc1b868a9fd947d7706cd3205672bb14fd4967b6177ebe940777b89799e8d"} Nov 25 10:02:59 crc kubenswrapper[4854]: I1125 10:02:59.557233 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-5688985587-fs97x" Nov 25 10:03:01 crc kubenswrapper[4854]: I1125 10:03:01.346838 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5d75f767dc-mjfvn" Nov 25 10:03:01 crc kubenswrapper[4854]: I1125 10:03:01.379691 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-5688985587-fs97x" podStartSLOduration=4.379649935 podStartE2EDuration="4.379649935s" podCreationTimestamp="2025-11-25 10:02:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:02:59.580033123 +0000 UTC m=+1585.433026499" watchObservedRunningTime="2025-11-25 10:03:01.379649935 +0000 UTC m=+1587.232643311" Nov 25 10:03:01 crc kubenswrapper[4854]: I1125 10:03:01.420038 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b75489c6f-znnrh"] Nov 25 10:03:01 crc kubenswrapper[4854]: I1125 10:03:01.420411 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5b75489c6f-znnrh" podUID="f8fac777-00f5-4d15-94d6-c9e0c93ca38e" containerName="dnsmasq-dns" containerID="cri-o://20413ca977773f26ecf76393a65f48af507728a83d9868543151a813c3739550" gracePeriod=10 Nov 25 10:03:01 crc kubenswrapper[4854]: I1125 10:03:01.627566 4854 generic.go:334] "Generic (PLEG): container finished" podID="f8fac777-00f5-4d15-94d6-c9e0c93ca38e" containerID="20413ca977773f26ecf76393a65f48af507728a83d9868543151a813c3739550" exitCode=0 Nov 25 10:03:01 crc kubenswrapper[4854]: I1125 10:03:01.627877 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b75489c6f-znnrh" event={"ID":"f8fac777-00f5-4d15-94d6-c9e0c93ca38e","Type":"ContainerDied","Data":"20413ca977773f26ecf76393a65f48af507728a83d9868543151a813c3739550"} Nov 25 10:03:02 crc kubenswrapper[4854]: I1125 10:03:02.106384 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b75489c6f-znnrh" Nov 25 10:03:02 crc kubenswrapper[4854]: I1125 10:03:02.264294 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-config\") pod \"f8fac777-00f5-4d15-94d6-c9e0c93ca38e\" (UID: \"f8fac777-00f5-4d15-94d6-c9e0c93ca38e\") " Nov 25 10:03:02 crc kubenswrapper[4854]: I1125 10:03:02.264422 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-ovsdbserver-sb\") pod \"f8fac777-00f5-4d15-94d6-c9e0c93ca38e\" (UID: \"f8fac777-00f5-4d15-94d6-c9e0c93ca38e\") " Nov 25 10:03:02 crc kubenswrapper[4854]: I1125 10:03:02.264487 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-dns-svc\") pod \"f8fac777-00f5-4d15-94d6-c9e0c93ca38e\" (UID: \"f8fac777-00f5-4d15-94d6-c9e0c93ca38e\") " Nov 25 10:03:02 crc kubenswrapper[4854]: I1125 10:03:02.264996 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v7wld\" (UniqueName: \"kubernetes.io/projected/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-kube-api-access-v7wld\") pod \"f8fac777-00f5-4d15-94d6-c9e0c93ca38e\" (UID: \"f8fac777-00f5-4d15-94d6-c9e0c93ca38e\") " Nov 25 10:03:02 crc kubenswrapper[4854]: I1125 10:03:02.265116 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-ovsdbserver-nb\") pod \"f8fac777-00f5-4d15-94d6-c9e0c93ca38e\" (UID: \"f8fac777-00f5-4d15-94d6-c9e0c93ca38e\") " Nov 25 10:03:02 crc kubenswrapper[4854]: I1125 10:03:02.265153 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-dns-swift-storage-0\") pod \"f8fac777-00f5-4d15-94d6-c9e0c93ca38e\" (UID: \"f8fac777-00f5-4d15-94d6-c9e0c93ca38e\") " Nov 25 10:03:02 crc kubenswrapper[4854]: I1125 10:03:02.265177 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-openstack-edpm-ipam\") pod \"f8fac777-00f5-4d15-94d6-c9e0c93ca38e\" (UID: \"f8fac777-00f5-4d15-94d6-c9e0c93ca38e\") " Nov 25 10:03:02 crc kubenswrapper[4854]: I1125 10:03:02.286905 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-kube-api-access-v7wld" (OuterVolumeSpecName: "kube-api-access-v7wld") pod "f8fac777-00f5-4d15-94d6-c9e0c93ca38e" (UID: "f8fac777-00f5-4d15-94d6-c9e0c93ca38e"). InnerVolumeSpecName "kube-api-access-v7wld". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:03:02 crc kubenswrapper[4854]: I1125 10:03:02.346743 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f8fac777-00f5-4d15-94d6-c9e0c93ca38e" (UID: "f8fac777-00f5-4d15-94d6-c9e0c93ca38e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:03:02 crc kubenswrapper[4854]: I1125 10:03:02.356448 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "f8fac777-00f5-4d15-94d6-c9e0c93ca38e" (UID: "f8fac777-00f5-4d15-94d6-c9e0c93ca38e"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:03:02 crc kubenswrapper[4854]: I1125 10:03:02.356742 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-config" (OuterVolumeSpecName: "config") pod "f8fac777-00f5-4d15-94d6-c9e0c93ca38e" (UID: "f8fac777-00f5-4d15-94d6-c9e0c93ca38e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:03:02 crc kubenswrapper[4854]: I1125 10:03:02.368471 4854 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:02 crc kubenswrapper[4854]: I1125 10:03:02.368506 4854 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:02 crc kubenswrapper[4854]: I1125 10:03:02.368517 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v7wld\" (UniqueName: \"kubernetes.io/projected/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-kube-api-access-v7wld\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:02 crc kubenswrapper[4854]: I1125 10:03:02.368525 4854 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:02 crc kubenswrapper[4854]: I1125 10:03:02.374034 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "f8fac777-00f5-4d15-94d6-c9e0c93ca38e" (UID: "f8fac777-00f5-4d15-94d6-c9e0c93ca38e"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:03:02 crc kubenswrapper[4854]: I1125 10:03:02.381343 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f8fac777-00f5-4d15-94d6-c9e0c93ca38e" (UID: "f8fac777-00f5-4d15-94d6-c9e0c93ca38e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:03:02 crc kubenswrapper[4854]: I1125 10:03:02.384270 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f8fac777-00f5-4d15-94d6-c9e0c93ca38e" (UID: "f8fac777-00f5-4d15-94d6-c9e0c93ca38e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:03:02 crc kubenswrapper[4854]: I1125 10:03:02.416377 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="a0da5a5f-c737-4ad7-a34a-fbad8d68866f" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.1.5:3000/\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 10:03:02 crc kubenswrapper[4854]: I1125 10:03:02.471065 4854 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:02 crc kubenswrapper[4854]: I1125 10:03:02.471112 4854 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:02 crc kubenswrapper[4854]: I1125 10:03:02.471126 4854 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f8fac777-00f5-4d15-94d6-c9e0c93ca38e-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:02 crc kubenswrapper[4854]: I1125 10:03:02.662918 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b75489c6f-znnrh" event={"ID":"f8fac777-00f5-4d15-94d6-c9e0c93ca38e","Type":"ContainerDied","Data":"80701b5437b2bcd3798cc66ca0619fc9ef5fe976435b4342e6348c7c7c4cd2f0"} Nov 25 10:03:02 crc kubenswrapper[4854]: I1125 10:03:02.662974 4854 scope.go:117] "RemoveContainer" containerID="20413ca977773f26ecf76393a65f48af507728a83d9868543151a813c3739550" Nov 25 10:03:02 crc kubenswrapper[4854]: I1125 10:03:02.663147 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b75489c6f-znnrh" Nov 25 10:03:02 crc kubenswrapper[4854]: I1125 10:03:02.679947 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-77db595d79-zsh9l" event={"ID":"5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d","Type":"ContainerStarted","Data":"54c7e51bdef932fe51f586724cc9430f286d6033e299700a39bfc41ffddf30b8"} Nov 25 10:03:02 crc kubenswrapper[4854]: I1125 10:03:02.681323 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-77db595d79-zsh9l" Nov 25 10:03:02 crc kubenswrapper[4854]: I1125 10:03:02.702548 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5f59c54579-mqcrl" event={"ID":"0bdea020-6056-4c2f-afb7-c715fb1116c0","Type":"ContainerStarted","Data":"a537bebf3622421ad64c75c42c451c1bebe64f477f99b9c52726fe7cf1a81629"} Nov 25 10:03:02 crc kubenswrapper[4854]: I1125 10:03:02.703940 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-5f59c54579-mqcrl" Nov 25 10:03:02 crc kubenswrapper[4854]: I1125 10:03:02.735752 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-77db595d79-zsh9l" podStartSLOduration=2.735921396 podStartE2EDuration="5.735736295s" podCreationTimestamp="2025-11-25 10:02:57 +0000 UTC" firstStartedPulling="2025-11-25 10:02:58.582224786 +0000 UTC m=+1584.435218162" lastFinishedPulling="2025-11-25 10:03:01.582039685 +0000 UTC m=+1587.435033061" observedRunningTime="2025-11-25 10:03:02.715072903 +0000 UTC m=+1588.568066279" watchObservedRunningTime="2025-11-25 10:03:02.735736295 +0000 UTC m=+1588.588729671" Nov 25 10:03:02 crc kubenswrapper[4854]: I1125 10:03:02.765230 4854 scope.go:117] "RemoveContainer" containerID="c878571d9e579dadfba58ec19ce280da658c4088b918e1be674661f1643e9295" Nov 25 10:03:02 crc kubenswrapper[4854]: I1125 10:03:02.777254 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-5f59c54579-mqcrl" podStartSLOduration=2.8515951680000002 podStartE2EDuration="5.777235024s" podCreationTimestamp="2025-11-25 10:02:57 +0000 UTC" firstStartedPulling="2025-11-25 10:02:58.66476277 +0000 UTC m=+1584.517756156" lastFinishedPulling="2025-11-25 10:03:01.590402636 +0000 UTC m=+1587.443396012" observedRunningTime="2025-11-25 10:03:02.761128428 +0000 UTC m=+1588.614121824" watchObservedRunningTime="2025-11-25 10:03:02.777235024 +0000 UTC m=+1588.630228400" Nov 25 10:03:02 crc kubenswrapper[4854]: I1125 10:03:02.808211 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b75489c6f-znnrh"] Nov 25 10:03:02 crc kubenswrapper[4854]: I1125 10:03:02.825625 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5b75489c6f-znnrh"] Nov 25 10:03:03 crc kubenswrapper[4854]: I1125 10:03:03.028857 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f8fac777-00f5-4d15-94d6-c9e0c93ca38e" path="/var/lib/kubelet/pods/f8fac777-00f5-4d15-94d6-c9e0c93ca38e/volumes" Nov 25 10:03:06 crc kubenswrapper[4854]: I1125 10:03:06.045073 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 25 10:03:09 crc kubenswrapper[4854]: I1125 10:03:09.376609 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-77db595d79-zsh9l" Nov 25 10:03:09 crc kubenswrapper[4854]: I1125 10:03:09.450400 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-756b755c6-wqm2k"] Nov 25 10:03:09 crc kubenswrapper[4854]: I1125 10:03:09.450900 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-cfnapi-756b755c6-wqm2k" podUID="aeb8dcf6-3640-4930-8663-be372820d69c" containerName="heat-cfnapi" containerID="cri-o://7c876d7b8becacb2d7cf0f7a7a2fa2e375c90003ca3092d163dae8f1456e2edc" gracePeriod=60 Nov 25 10:03:09 crc kubenswrapper[4854]: I1125 10:03:09.630356 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-5f59c54579-mqcrl" Nov 25 10:03:09 crc kubenswrapper[4854]: I1125 10:03:09.718514 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-7cf6b8b6cd-csh8n"] Nov 25 10:03:09 crc kubenswrapper[4854]: I1125 10:03:09.718770 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-api-7cf6b8b6cd-csh8n" podUID="57723d21-6e34-4a5b-8063-9d5b97022cfc" containerName="heat-api" containerID="cri-o://01de8bc75edf7fe46d55ccf6004435ed8d21050212f5db0f8d6b3f61509c663f" gracePeriod=60 Nov 25 10:03:10 crc kubenswrapper[4854]: I1125 10:03:10.014643 4854 scope.go:117] "RemoveContainer" containerID="e218b542fd934fd34b157757f419e89c8565fa64cb58598ebd3da742271577ef" Nov 25 10:03:10 crc kubenswrapper[4854]: E1125 10:03:10.015297 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:03:12 crc kubenswrapper[4854]: I1125 10:03:12.623279 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-cfnapi-756b755c6-wqm2k" podUID="aeb8dcf6-3640-4930-8663-be372820d69c" containerName="heat-cfnapi" probeResult="failure" output="Get \"https://10.217.0.222:8000/healthcheck\": read tcp 10.217.0.2:42190->10.217.0.222:8000: read: connection reset by peer" Nov 25 10:03:12 crc kubenswrapper[4854]: I1125 10:03:12.828380 4854 generic.go:334] "Generic (PLEG): container finished" podID="aeb8dcf6-3640-4930-8663-be372820d69c" containerID="7c876d7b8becacb2d7cf0f7a7a2fa2e375c90003ca3092d163dae8f1456e2edc" exitCode=0 Nov 25 10:03:12 crc kubenswrapper[4854]: I1125 10:03:12.828540 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-756b755c6-wqm2k" event={"ID":"aeb8dcf6-3640-4930-8663-be372820d69c","Type":"ContainerDied","Data":"7c876d7b8becacb2d7cf0f7a7a2fa2e375c90003ca3092d163dae8f1456e2edc"} Nov 25 10:03:13 crc kubenswrapper[4854]: I1125 10:03:13.697273 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-7cf6b8b6cd-csh8n" Nov 25 10:03:13 crc kubenswrapper[4854]: I1125 10:03:13.810348 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/57723d21-6e34-4a5b-8063-9d5b97022cfc-config-data-custom\") pod \"57723d21-6e34-4a5b-8063-9d5b97022cfc\" (UID: \"57723d21-6e34-4a5b-8063-9d5b97022cfc\") " Nov 25 10:03:13 crc kubenswrapper[4854]: I1125 10:03:13.810599 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/57723d21-6e34-4a5b-8063-9d5b97022cfc-internal-tls-certs\") pod \"57723d21-6e34-4a5b-8063-9d5b97022cfc\" (UID: \"57723d21-6e34-4a5b-8063-9d5b97022cfc\") " Nov 25 10:03:13 crc kubenswrapper[4854]: I1125 10:03:13.810717 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57723d21-6e34-4a5b-8063-9d5b97022cfc-config-data\") pod \"57723d21-6e34-4a5b-8063-9d5b97022cfc\" (UID: \"57723d21-6e34-4a5b-8063-9d5b97022cfc\") " Nov 25 10:03:13 crc kubenswrapper[4854]: I1125 10:03:13.811310 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-74v5b\" (UniqueName: \"kubernetes.io/projected/57723d21-6e34-4a5b-8063-9d5b97022cfc-kube-api-access-74v5b\") pod \"57723d21-6e34-4a5b-8063-9d5b97022cfc\" (UID: \"57723d21-6e34-4a5b-8063-9d5b97022cfc\") " Nov 25 10:03:13 crc kubenswrapper[4854]: I1125 10:03:13.811442 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57723d21-6e34-4a5b-8063-9d5b97022cfc-combined-ca-bundle\") pod \"57723d21-6e34-4a5b-8063-9d5b97022cfc\" (UID: \"57723d21-6e34-4a5b-8063-9d5b97022cfc\") " Nov 25 10:03:13 crc kubenswrapper[4854]: I1125 10:03:13.811589 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/57723d21-6e34-4a5b-8063-9d5b97022cfc-public-tls-certs\") pod \"57723d21-6e34-4a5b-8063-9d5b97022cfc\" (UID: \"57723d21-6e34-4a5b-8063-9d5b97022cfc\") " Nov 25 10:03:13 crc kubenswrapper[4854]: I1125 10:03:13.820963 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57723d21-6e34-4a5b-8063-9d5b97022cfc-kube-api-access-74v5b" (OuterVolumeSpecName: "kube-api-access-74v5b") pod "57723d21-6e34-4a5b-8063-9d5b97022cfc" (UID: "57723d21-6e34-4a5b-8063-9d5b97022cfc"). InnerVolumeSpecName "kube-api-access-74v5b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:03:13 crc kubenswrapper[4854]: I1125 10:03:13.822290 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57723d21-6e34-4a5b-8063-9d5b97022cfc-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "57723d21-6e34-4a5b-8063-9d5b97022cfc" (UID: "57723d21-6e34-4a5b-8063-9d5b97022cfc"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:03:13 crc kubenswrapper[4854]: I1125 10:03:13.879855 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57723d21-6e34-4a5b-8063-9d5b97022cfc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "57723d21-6e34-4a5b-8063-9d5b97022cfc" (UID: "57723d21-6e34-4a5b-8063-9d5b97022cfc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:03:13 crc kubenswrapper[4854]: I1125 10:03:13.880773 4854 generic.go:334] "Generic (PLEG): container finished" podID="57723d21-6e34-4a5b-8063-9d5b97022cfc" containerID="01de8bc75edf7fe46d55ccf6004435ed8d21050212f5db0f8d6b3f61509c663f" exitCode=0 Nov 25 10:03:13 crc kubenswrapper[4854]: I1125 10:03:13.880879 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-7cf6b8b6cd-csh8n" event={"ID":"57723d21-6e34-4a5b-8063-9d5b97022cfc","Type":"ContainerDied","Data":"01de8bc75edf7fe46d55ccf6004435ed8d21050212f5db0f8d6b3f61509c663f"} Nov 25 10:03:13 crc kubenswrapper[4854]: I1125 10:03:13.880911 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-7cf6b8b6cd-csh8n" event={"ID":"57723d21-6e34-4a5b-8063-9d5b97022cfc","Type":"ContainerDied","Data":"e1d930f79b3bc94c3a9a9bef5c8dc4358d8c98cd7cb9aab0b22f84f31c1131fa"} Nov 25 10:03:13 crc kubenswrapper[4854]: I1125 10:03:13.880930 4854 scope.go:117] "RemoveContainer" containerID="01de8bc75edf7fe46d55ccf6004435ed8d21050212f5db0f8d6b3f61509c663f" Nov 25 10:03:13 crc kubenswrapper[4854]: I1125 10:03:13.881105 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-7cf6b8b6cd-csh8n" Nov 25 10:03:13 crc kubenswrapper[4854]: I1125 10:03:13.899170 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-756b755c6-wqm2k" event={"ID":"aeb8dcf6-3640-4930-8663-be372820d69c","Type":"ContainerDied","Data":"77e544bca9d533d4f5127bd91c0be1c0a1e8faa48fc0f8dbe455efa37408624f"} Nov 25 10:03:13 crc kubenswrapper[4854]: I1125 10:03:13.899210 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="77e544bca9d533d4f5127bd91c0be1c0a1e8faa48fc0f8dbe455efa37408624f" Nov 25 10:03:13 crc kubenswrapper[4854]: I1125 10:03:13.914795 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57723d21-6e34-4a5b-8063-9d5b97022cfc-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "57723d21-6e34-4a5b-8063-9d5b97022cfc" (UID: "57723d21-6e34-4a5b-8063-9d5b97022cfc"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:03:13 crc kubenswrapper[4854]: I1125 10:03:13.915535 4854 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/57723d21-6e34-4a5b-8063-9d5b97022cfc-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:13 crc kubenswrapper[4854]: I1125 10:03:13.915567 4854 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/57723d21-6e34-4a5b-8063-9d5b97022cfc-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:13 crc kubenswrapper[4854]: I1125 10:03:13.915580 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-74v5b\" (UniqueName: \"kubernetes.io/projected/57723d21-6e34-4a5b-8063-9d5b97022cfc-kube-api-access-74v5b\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:13 crc kubenswrapper[4854]: I1125 10:03:13.915594 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57723d21-6e34-4a5b-8063-9d5b97022cfc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:13 crc kubenswrapper[4854]: I1125 10:03:13.919306 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57723d21-6e34-4a5b-8063-9d5b97022cfc-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "57723d21-6e34-4a5b-8063-9d5b97022cfc" (UID: "57723d21-6e34-4a5b-8063-9d5b97022cfc"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:03:13 crc kubenswrapper[4854]: I1125 10:03:13.925872 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57723d21-6e34-4a5b-8063-9d5b97022cfc-config-data" (OuterVolumeSpecName: "config-data") pod "57723d21-6e34-4a5b-8063-9d5b97022cfc" (UID: "57723d21-6e34-4a5b-8063-9d5b97022cfc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:03:14 crc kubenswrapper[4854]: I1125 10:03:14.018091 4854 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/57723d21-6e34-4a5b-8063-9d5b97022cfc-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:14 crc kubenswrapper[4854]: I1125 10:03:14.018122 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57723d21-6e34-4a5b-8063-9d5b97022cfc-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:14 crc kubenswrapper[4854]: I1125 10:03:14.035482 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-756b755c6-wqm2k" Nov 25 10:03:14 crc kubenswrapper[4854]: I1125 10:03:14.045293 4854 scope.go:117] "RemoveContainer" containerID="01de8bc75edf7fe46d55ccf6004435ed8d21050212f5db0f8d6b3f61509c663f" Nov 25 10:03:14 crc kubenswrapper[4854]: E1125 10:03:14.045890 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"01de8bc75edf7fe46d55ccf6004435ed8d21050212f5db0f8d6b3f61509c663f\": container with ID starting with 01de8bc75edf7fe46d55ccf6004435ed8d21050212f5db0f8d6b3f61509c663f not found: ID does not exist" containerID="01de8bc75edf7fe46d55ccf6004435ed8d21050212f5db0f8d6b3f61509c663f" Nov 25 10:03:14 crc kubenswrapper[4854]: I1125 10:03:14.045932 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01de8bc75edf7fe46d55ccf6004435ed8d21050212f5db0f8d6b3f61509c663f"} err="failed to get container status \"01de8bc75edf7fe46d55ccf6004435ed8d21050212f5db0f8d6b3f61509c663f\": rpc error: code = NotFound desc = could not find container \"01de8bc75edf7fe46d55ccf6004435ed8d21050212f5db0f8d6b3f61509c663f\": container with ID starting with 01de8bc75edf7fe46d55ccf6004435ed8d21050212f5db0f8d6b3f61509c663f not found: ID does not exist" Nov 25 10:03:14 crc kubenswrapper[4854]: I1125 10:03:14.119537 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aeb8dcf6-3640-4930-8663-be372820d69c-config-data\") pod \"aeb8dcf6-3640-4930-8663-be372820d69c\" (UID: \"aeb8dcf6-3640-4930-8663-be372820d69c\") " Nov 25 10:03:14 crc kubenswrapper[4854]: I1125 10:03:14.119612 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/aeb8dcf6-3640-4930-8663-be372820d69c-public-tls-certs\") pod \"aeb8dcf6-3640-4930-8663-be372820d69c\" (UID: \"aeb8dcf6-3640-4930-8663-be372820d69c\") " Nov 25 10:03:14 crc kubenswrapper[4854]: I1125 10:03:14.119651 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/aeb8dcf6-3640-4930-8663-be372820d69c-internal-tls-certs\") pod \"aeb8dcf6-3640-4930-8663-be372820d69c\" (UID: \"aeb8dcf6-3640-4930-8663-be372820d69c\") " Nov 25 10:03:14 crc kubenswrapper[4854]: I1125 10:03:14.119774 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aeb8dcf6-3640-4930-8663-be372820d69c-combined-ca-bundle\") pod \"aeb8dcf6-3640-4930-8663-be372820d69c\" (UID: \"aeb8dcf6-3640-4930-8663-be372820d69c\") " Nov 25 10:03:14 crc kubenswrapper[4854]: I1125 10:03:14.119806 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k87cl\" (UniqueName: \"kubernetes.io/projected/aeb8dcf6-3640-4930-8663-be372820d69c-kube-api-access-k87cl\") pod \"aeb8dcf6-3640-4930-8663-be372820d69c\" (UID: \"aeb8dcf6-3640-4930-8663-be372820d69c\") " Nov 25 10:03:14 crc kubenswrapper[4854]: I1125 10:03:14.119936 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/aeb8dcf6-3640-4930-8663-be372820d69c-config-data-custom\") pod \"aeb8dcf6-3640-4930-8663-be372820d69c\" (UID: \"aeb8dcf6-3640-4930-8663-be372820d69c\") " Nov 25 10:03:14 crc kubenswrapper[4854]: I1125 10:03:14.137002 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aeb8dcf6-3640-4930-8663-be372820d69c-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "aeb8dcf6-3640-4930-8663-be372820d69c" (UID: "aeb8dcf6-3640-4930-8663-be372820d69c"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:03:14 crc kubenswrapper[4854]: I1125 10:03:14.160499 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aeb8dcf6-3640-4930-8663-be372820d69c-kube-api-access-k87cl" (OuterVolumeSpecName: "kube-api-access-k87cl") pod "aeb8dcf6-3640-4930-8663-be372820d69c" (UID: "aeb8dcf6-3640-4930-8663-be372820d69c"). InnerVolumeSpecName "kube-api-access-k87cl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:03:14 crc kubenswrapper[4854]: I1125 10:03:14.215314 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aeb8dcf6-3640-4930-8663-be372820d69c-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "aeb8dcf6-3640-4930-8663-be372820d69c" (UID: "aeb8dcf6-3640-4930-8663-be372820d69c"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:03:14 crc kubenswrapper[4854]: I1125 10:03:14.220457 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aeb8dcf6-3640-4930-8663-be372820d69c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "aeb8dcf6-3640-4930-8663-be372820d69c" (UID: "aeb8dcf6-3640-4930-8663-be372820d69c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:03:14 crc kubenswrapper[4854]: I1125 10:03:14.229024 4854 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/aeb8dcf6-3640-4930-8663-be372820d69c-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:14 crc kubenswrapper[4854]: I1125 10:03:14.229051 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aeb8dcf6-3640-4930-8663-be372820d69c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:14 crc kubenswrapper[4854]: I1125 10:03:14.229060 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k87cl\" (UniqueName: \"kubernetes.io/projected/aeb8dcf6-3640-4930-8663-be372820d69c-kube-api-access-k87cl\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:14 crc kubenswrapper[4854]: I1125 10:03:14.229071 4854 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/aeb8dcf6-3640-4930-8663-be372820d69c-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:14 crc kubenswrapper[4854]: I1125 10:03:14.231852 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aeb8dcf6-3640-4930-8663-be372820d69c-config-data" (OuterVolumeSpecName: "config-data") pod "aeb8dcf6-3640-4930-8663-be372820d69c" (UID: "aeb8dcf6-3640-4930-8663-be372820d69c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:03:14 crc kubenswrapper[4854]: I1125 10:03:14.234313 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aeb8dcf6-3640-4930-8663-be372820d69c-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "aeb8dcf6-3640-4930-8663-be372820d69c" (UID: "aeb8dcf6-3640-4930-8663-be372820d69c"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:03:14 crc kubenswrapper[4854]: I1125 10:03:14.294703 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-7cf6b8b6cd-csh8n"] Nov 25 10:03:14 crc kubenswrapper[4854]: I1125 10:03:14.305192 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-7cf6b8b6cd-csh8n"] Nov 25 10:03:14 crc kubenswrapper[4854]: I1125 10:03:14.331531 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aeb8dcf6-3640-4930-8663-be372820d69c-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:14 crc kubenswrapper[4854]: I1125 10:03:14.331562 4854 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/aeb8dcf6-3640-4930-8663-be372820d69c-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:14 crc kubenswrapper[4854]: I1125 10:03:14.923749 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-756b755c6-wqm2k" Nov 25 10:03:14 crc kubenswrapper[4854]: I1125 10:03:14.961863 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-756b755c6-wqm2k"] Nov 25 10:03:14 crc kubenswrapper[4854]: I1125 10:03:14.974706 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-756b755c6-wqm2k"] Nov 25 10:03:15 crc kubenswrapper[4854]: I1125 10:03:15.026924 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57723d21-6e34-4a5b-8063-9d5b97022cfc" path="/var/lib/kubelet/pods/57723d21-6e34-4a5b-8063-9d5b97022cfc/volumes" Nov 25 10:03:15 crc kubenswrapper[4854]: I1125 10:03:15.027508 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aeb8dcf6-3640-4930-8663-be372820d69c" path="/var/lib/kubelet/pods/aeb8dcf6-3640-4930-8663-be372820d69c/volumes" Nov 25 10:03:15 crc kubenswrapper[4854]: I1125 10:03:15.838681 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb"] Nov 25 10:03:15 crc kubenswrapper[4854]: E1125 10:03:15.839508 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aeb8dcf6-3640-4930-8663-be372820d69c" containerName="heat-cfnapi" Nov 25 10:03:15 crc kubenswrapper[4854]: I1125 10:03:15.839552 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="aeb8dcf6-3640-4930-8663-be372820d69c" containerName="heat-cfnapi" Nov 25 10:03:15 crc kubenswrapper[4854]: E1125 10:03:15.839566 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57723d21-6e34-4a5b-8063-9d5b97022cfc" containerName="heat-api" Nov 25 10:03:15 crc kubenswrapper[4854]: I1125 10:03:15.839573 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="57723d21-6e34-4a5b-8063-9d5b97022cfc" containerName="heat-api" Nov 25 10:03:15 crc kubenswrapper[4854]: E1125 10:03:15.839616 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8fac777-00f5-4d15-94d6-c9e0c93ca38e" containerName="dnsmasq-dns" Nov 25 10:03:15 crc kubenswrapper[4854]: I1125 10:03:15.839622 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8fac777-00f5-4d15-94d6-c9e0c93ca38e" containerName="dnsmasq-dns" Nov 25 10:03:15 crc kubenswrapper[4854]: E1125 10:03:15.839634 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8fac777-00f5-4d15-94d6-c9e0c93ca38e" containerName="init" Nov 25 10:03:15 crc kubenswrapper[4854]: I1125 10:03:15.839640 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8fac777-00f5-4d15-94d6-c9e0c93ca38e" containerName="init" Nov 25 10:03:15 crc kubenswrapper[4854]: I1125 10:03:15.839881 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8fac777-00f5-4d15-94d6-c9e0c93ca38e" containerName="dnsmasq-dns" Nov 25 10:03:15 crc kubenswrapper[4854]: I1125 10:03:15.839907 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="57723d21-6e34-4a5b-8063-9d5b97022cfc" containerName="heat-api" Nov 25 10:03:15 crc kubenswrapper[4854]: I1125 10:03:15.839919 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="aeb8dcf6-3640-4930-8663-be372820d69c" containerName="heat-cfnapi" Nov 25 10:03:15 crc kubenswrapper[4854]: I1125 10:03:15.840865 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb" Nov 25 10:03:15 crc kubenswrapper[4854]: I1125 10:03:15.843524 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 10:03:15 crc kubenswrapper[4854]: I1125 10:03:15.844011 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 10:03:15 crc kubenswrapper[4854]: I1125 10:03:15.844124 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-6xbdw" Nov 25 10:03:15 crc kubenswrapper[4854]: I1125 10:03:15.844409 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:03:15 crc kubenswrapper[4854]: I1125 10:03:15.852574 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb"] Nov 25 10:03:15 crc kubenswrapper[4854]: I1125 10:03:15.975803 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/10f4bf87-5b7e-4077-8d81-13f86562549e-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb\" (UID: \"10f4bf87-5b7e-4077-8d81-13f86562549e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb" Nov 25 10:03:15 crc kubenswrapper[4854]: I1125 10:03:15.975981 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10f4bf87-5b7e-4077-8d81-13f86562549e-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb\" (UID: \"10f4bf87-5b7e-4077-8d81-13f86562549e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb" Nov 25 10:03:15 crc kubenswrapper[4854]: I1125 10:03:15.976082 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9fvgf\" (UniqueName: \"kubernetes.io/projected/10f4bf87-5b7e-4077-8d81-13f86562549e-kube-api-access-9fvgf\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb\" (UID: \"10f4bf87-5b7e-4077-8d81-13f86562549e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb" Nov 25 10:03:15 crc kubenswrapper[4854]: I1125 10:03:15.976139 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/10f4bf87-5b7e-4077-8d81-13f86562549e-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb\" (UID: \"10f4bf87-5b7e-4077-8d81-13f86562549e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb" Nov 25 10:03:16 crc kubenswrapper[4854]: I1125 10:03:16.078279 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/10f4bf87-5b7e-4077-8d81-13f86562549e-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb\" (UID: \"10f4bf87-5b7e-4077-8d81-13f86562549e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb" Nov 25 10:03:16 crc kubenswrapper[4854]: I1125 10:03:16.078426 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10f4bf87-5b7e-4077-8d81-13f86562549e-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb\" (UID: \"10f4bf87-5b7e-4077-8d81-13f86562549e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb" Nov 25 10:03:16 crc kubenswrapper[4854]: I1125 10:03:16.078519 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9fvgf\" (UniqueName: \"kubernetes.io/projected/10f4bf87-5b7e-4077-8d81-13f86562549e-kube-api-access-9fvgf\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb\" (UID: \"10f4bf87-5b7e-4077-8d81-13f86562549e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb" Nov 25 10:03:16 crc kubenswrapper[4854]: I1125 10:03:16.078577 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/10f4bf87-5b7e-4077-8d81-13f86562549e-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb\" (UID: \"10f4bf87-5b7e-4077-8d81-13f86562549e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb" Nov 25 10:03:16 crc kubenswrapper[4854]: I1125 10:03:16.084606 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/10f4bf87-5b7e-4077-8d81-13f86562549e-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb\" (UID: \"10f4bf87-5b7e-4077-8d81-13f86562549e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb" Nov 25 10:03:16 crc kubenswrapper[4854]: I1125 10:03:16.089175 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/10f4bf87-5b7e-4077-8d81-13f86562549e-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb\" (UID: \"10f4bf87-5b7e-4077-8d81-13f86562549e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb" Nov 25 10:03:16 crc kubenswrapper[4854]: I1125 10:03:16.091653 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10f4bf87-5b7e-4077-8d81-13f86562549e-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb\" (UID: \"10f4bf87-5b7e-4077-8d81-13f86562549e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb" Nov 25 10:03:16 crc kubenswrapper[4854]: I1125 10:03:16.101019 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9fvgf\" (UniqueName: \"kubernetes.io/projected/10f4bf87-5b7e-4077-8d81-13f86562549e-kube-api-access-9fvgf\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb\" (UID: \"10f4bf87-5b7e-4077-8d81-13f86562549e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb" Nov 25 10:03:16 crc kubenswrapper[4854]: I1125 10:03:16.161155 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb" Nov 25 10:03:16 crc kubenswrapper[4854]: I1125 10:03:16.951749 4854 generic.go:334] "Generic (PLEG): container finished" podID="bb562532-b4b2-42ad-9d8e-a9b230a3bcf5" containerID="a5a5abb0d1685956efb0a36a9f42ccc65101244e88934194a9db13ad768b7250" exitCode=0 Nov 25 10:03:16 crc kubenswrapper[4854]: I1125 10:03:16.951946 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"bb562532-b4b2-42ad-9d8e-a9b230a3bcf5","Type":"ContainerDied","Data":"a5a5abb0d1685956efb0a36a9f42ccc65101244e88934194a9db13ad768b7250"} Nov 25 10:03:17 crc kubenswrapper[4854]: W1125 10:03:17.023241 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod10f4bf87_5b7e_4077_8d81_13f86562549e.slice/crio-8ceb515b5c623462d87ef0fd6109cef2146a2fd4f6bc3300659a2f6745de6dbb WatchSource:0}: Error finding container 8ceb515b5c623462d87ef0fd6109cef2146a2fd4f6bc3300659a2f6745de6dbb: Status 404 returned error can't find the container with id 8ceb515b5c623462d87ef0fd6109cef2146a2fd4f6bc3300659a2f6745de6dbb Nov 25 10:03:17 crc kubenswrapper[4854]: I1125 10:03:17.080744 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb"] Nov 25 10:03:17 crc kubenswrapper[4854]: I1125 10:03:17.850825 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-5688985587-fs97x" Nov 25 10:03:17 crc kubenswrapper[4854]: I1125 10:03:17.920560 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-f5c9d8946-qfdms"] Nov 25 10:03:17 crc kubenswrapper[4854]: I1125 10:03:17.921031 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-engine-f5c9d8946-qfdms" podUID="15d8094a-7d5b-4f52-8ef0-388820ead440" containerName="heat-engine" containerID="cri-o://73ec5619bc7ec39b579dad55992bd7d6a28694fb9e422d3e23d8473b47a0f2f9" gracePeriod=60 Nov 25 10:03:17 crc kubenswrapper[4854]: I1125 10:03:17.974657 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb" event={"ID":"10f4bf87-5b7e-4077-8d81-13f86562549e","Type":"ContainerStarted","Data":"8ceb515b5c623462d87ef0fd6109cef2146a2fd4f6bc3300659a2f6745de6dbb"} Nov 25 10:03:17 crc kubenswrapper[4854]: I1125 10:03:17.983686 4854 generic.go:334] "Generic (PLEG): container finished" podID="fdceb63d-e366-47f2-954d-29730788adbb" containerID="57ca1baa8bef0efad3056f83d970f90ef8e6c7a71b6a9ca8b6aea8c3b958d44b" exitCode=0 Nov 25 10:03:17 crc kubenswrapper[4854]: I1125 10:03:17.983816 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"fdceb63d-e366-47f2-954d-29730788adbb","Type":"ContainerDied","Data":"57ca1baa8bef0efad3056f83d970f90ef8e6c7a71b6a9ca8b6aea8c3b958d44b"} Nov 25 10:03:17 crc kubenswrapper[4854]: I1125 10:03:17.987642 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"bb562532-b4b2-42ad-9d8e-a9b230a3bcf5","Type":"ContainerStarted","Data":"66345ef866a9e88700a45607438dc2a6f952ab8ab16849657005ce6ac1677cda"} Nov 25 10:03:17 crc kubenswrapper[4854]: I1125 10:03:17.988984 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:03:18 crc kubenswrapper[4854]: I1125 10:03:18.114691 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=38.11465808 podStartE2EDuration="38.11465808s" podCreationTimestamp="2025-11-25 10:02:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:03:18.062404014 +0000 UTC m=+1603.915397390" watchObservedRunningTime="2025-11-25 10:03:18.11465808 +0000 UTC m=+1603.967651456" Nov 25 10:03:19 crc kubenswrapper[4854]: I1125 10:03:19.422812 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"fdceb63d-e366-47f2-954d-29730788adbb","Type":"ContainerStarted","Data":"a92893415d0a1aa41a587cf32da62f16fb2ca0f8c8e79dd7f282ec26f7a8d24d"} Nov 25 10:03:19 crc kubenswrapper[4854]: I1125 10:03:19.424994 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-2" Nov 25 10:03:19 crc kubenswrapper[4854]: I1125 10:03:19.458209 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-2" podStartSLOduration=39.458194102 podStartE2EDuration="39.458194102s" podCreationTimestamp="2025-11-25 10:02:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:03:19.457516514 +0000 UTC m=+1605.310509890" watchObservedRunningTime="2025-11-25 10:03:19.458194102 +0000 UTC m=+1605.311187478" Nov 25 10:03:20 crc kubenswrapper[4854]: E1125 10:03:20.140630 4854 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="73ec5619bc7ec39b579dad55992bd7d6a28694fb9e422d3e23d8473b47a0f2f9" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 25 10:03:20 crc kubenswrapper[4854]: E1125 10:03:20.142019 4854 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="73ec5619bc7ec39b579dad55992bd7d6a28694fb9e422d3e23d8473b47a0f2f9" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 25 10:03:20 crc kubenswrapper[4854]: E1125 10:03:20.143193 4854 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="73ec5619bc7ec39b579dad55992bd7d6a28694fb9e422d3e23d8473b47a0f2f9" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 25 10:03:20 crc kubenswrapper[4854]: E1125 10:03:20.143226 4854 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-f5c9d8946-qfdms" podUID="15d8094a-7d5b-4f52-8ef0-388820ead440" containerName="heat-engine" Nov 25 10:03:20 crc kubenswrapper[4854]: I1125 10:03:20.817732 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-sync-cbwwc"] Nov 25 10:03:20 crc kubenswrapper[4854]: I1125 10:03:20.830222 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-sync-cbwwc"] Nov 25 10:03:20 crc kubenswrapper[4854]: I1125 10:03:20.974227 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-sync-m27ln"] Nov 25 10:03:20 crc kubenswrapper[4854]: I1125 10:03:20.976210 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-m27ln" Nov 25 10:03:20 crc kubenswrapper[4854]: I1125 10:03:20.996806 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 25 10:03:20 crc kubenswrapper[4854]: I1125 10:03:20.997039 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-m27ln"] Nov 25 10:03:21 crc kubenswrapper[4854]: I1125 10:03:21.052663 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5e005c0f-47c9-4e9a-a788-6a9ec4c8db31" path="/var/lib/kubelet/pods/5e005c0f-47c9-4e9a-a788-6a9ec4c8db31/volumes" Nov 25 10:03:21 crc kubenswrapper[4854]: I1125 10:03:21.113534 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pfxww\" (UniqueName: \"kubernetes.io/projected/8be70514-ed7b-499b-acb7-c973bd2590c2-kube-api-access-pfxww\") pod \"aodh-db-sync-m27ln\" (UID: \"8be70514-ed7b-499b-acb7-c973bd2590c2\") " pod="openstack/aodh-db-sync-m27ln" Nov 25 10:03:21 crc kubenswrapper[4854]: I1125 10:03:21.113836 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8be70514-ed7b-499b-acb7-c973bd2590c2-scripts\") pod \"aodh-db-sync-m27ln\" (UID: \"8be70514-ed7b-499b-acb7-c973bd2590c2\") " pod="openstack/aodh-db-sync-m27ln" Nov 25 10:03:21 crc kubenswrapper[4854]: I1125 10:03:21.113879 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8be70514-ed7b-499b-acb7-c973bd2590c2-config-data\") pod \"aodh-db-sync-m27ln\" (UID: \"8be70514-ed7b-499b-acb7-c973bd2590c2\") " pod="openstack/aodh-db-sync-m27ln" Nov 25 10:03:21 crc kubenswrapper[4854]: I1125 10:03:21.113903 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8be70514-ed7b-499b-acb7-c973bd2590c2-combined-ca-bundle\") pod \"aodh-db-sync-m27ln\" (UID: \"8be70514-ed7b-499b-acb7-c973bd2590c2\") " pod="openstack/aodh-db-sync-m27ln" Nov 25 10:03:21 crc kubenswrapper[4854]: I1125 10:03:21.218419 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8be70514-ed7b-499b-acb7-c973bd2590c2-scripts\") pod \"aodh-db-sync-m27ln\" (UID: \"8be70514-ed7b-499b-acb7-c973bd2590c2\") " pod="openstack/aodh-db-sync-m27ln" Nov 25 10:03:21 crc kubenswrapper[4854]: I1125 10:03:21.218504 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8be70514-ed7b-499b-acb7-c973bd2590c2-config-data\") pod \"aodh-db-sync-m27ln\" (UID: \"8be70514-ed7b-499b-acb7-c973bd2590c2\") " pod="openstack/aodh-db-sync-m27ln" Nov 25 10:03:21 crc kubenswrapper[4854]: I1125 10:03:21.218528 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8be70514-ed7b-499b-acb7-c973bd2590c2-combined-ca-bundle\") pod \"aodh-db-sync-m27ln\" (UID: \"8be70514-ed7b-499b-acb7-c973bd2590c2\") " pod="openstack/aodh-db-sync-m27ln" Nov 25 10:03:21 crc kubenswrapper[4854]: I1125 10:03:21.218764 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pfxww\" (UniqueName: \"kubernetes.io/projected/8be70514-ed7b-499b-acb7-c973bd2590c2-kube-api-access-pfxww\") pod \"aodh-db-sync-m27ln\" (UID: \"8be70514-ed7b-499b-acb7-c973bd2590c2\") " pod="openstack/aodh-db-sync-m27ln" Nov 25 10:03:21 crc kubenswrapper[4854]: I1125 10:03:21.225609 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8be70514-ed7b-499b-acb7-c973bd2590c2-scripts\") pod \"aodh-db-sync-m27ln\" (UID: \"8be70514-ed7b-499b-acb7-c973bd2590c2\") " pod="openstack/aodh-db-sync-m27ln" Nov 25 10:03:21 crc kubenswrapper[4854]: I1125 10:03:21.227596 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8be70514-ed7b-499b-acb7-c973bd2590c2-config-data\") pod \"aodh-db-sync-m27ln\" (UID: \"8be70514-ed7b-499b-acb7-c973bd2590c2\") " pod="openstack/aodh-db-sync-m27ln" Nov 25 10:03:21 crc kubenswrapper[4854]: I1125 10:03:21.228206 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8be70514-ed7b-499b-acb7-c973bd2590c2-combined-ca-bundle\") pod \"aodh-db-sync-m27ln\" (UID: \"8be70514-ed7b-499b-acb7-c973bd2590c2\") " pod="openstack/aodh-db-sync-m27ln" Nov 25 10:03:21 crc kubenswrapper[4854]: I1125 10:03:21.259105 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pfxww\" (UniqueName: \"kubernetes.io/projected/8be70514-ed7b-499b-acb7-c973bd2590c2-kube-api-access-pfxww\") pod \"aodh-db-sync-m27ln\" (UID: \"8be70514-ed7b-499b-acb7-c973bd2590c2\") " pod="openstack/aodh-db-sync-m27ln" Nov 25 10:03:21 crc kubenswrapper[4854]: I1125 10:03:21.319775 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-m27ln" Nov 25 10:03:22 crc kubenswrapper[4854]: I1125 10:03:22.334961 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-m27ln"] Nov 25 10:03:22 crc kubenswrapper[4854]: I1125 10:03:22.467849 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-m27ln" event={"ID":"8be70514-ed7b-499b-acb7-c973bd2590c2","Type":"ContainerStarted","Data":"46511c88abe06384de2ef27f2bb207b4ee63ec6aca40b9136ee7de5e285b0fc3"} Nov 25 10:03:23 crc kubenswrapper[4854]: I1125 10:03:23.013536 4854 scope.go:117] "RemoveContainer" containerID="e218b542fd934fd34b157757f419e89c8565fa64cb58598ebd3da742271577ef" Nov 25 10:03:23 crc kubenswrapper[4854]: E1125 10:03:23.013906 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:03:30 crc kubenswrapper[4854]: E1125 10:03:30.141781 4854 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="73ec5619bc7ec39b579dad55992bd7d6a28694fb9e422d3e23d8473b47a0f2f9" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 25 10:03:30 crc kubenswrapper[4854]: E1125 10:03:30.143369 4854 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="73ec5619bc7ec39b579dad55992bd7d6a28694fb9e422d3e23d8473b47a0f2f9" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 25 10:03:30 crc kubenswrapper[4854]: E1125 10:03:30.144429 4854 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="73ec5619bc7ec39b579dad55992bd7d6a28694fb9e422d3e23d8473b47a0f2f9" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 25 10:03:30 crc kubenswrapper[4854]: E1125 10:03:30.144494 4854 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-f5c9d8946-qfdms" podUID="15d8094a-7d5b-4f52-8ef0-388820ead440" containerName="heat-engine" Nov 25 10:03:30 crc kubenswrapper[4854]: I1125 10:03:30.816656 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="bb562532-b4b2-42ad-9d8e-a9b230a3bcf5" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.1.14:5671: connect: connection refused" Nov 25 10:03:31 crc kubenswrapper[4854]: I1125 10:03:31.585315 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-2" podUID="fdceb63d-e366-47f2-954d-29730788adbb" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.1.15:5671: connect: connection refused" Nov 25 10:03:37 crc kubenswrapper[4854]: E1125 10:03:37.066487 4854 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest" Nov 25 10:03:37 crc kubenswrapper[4854]: E1125 10:03:37.067048 4854 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 25 10:03:37 crc kubenswrapper[4854]: container &Container{Name:repo-setup-edpm-deployment-openstack-edpm-ipam,Image:quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest,Command:[],Args:[ansible-runner run /runner -p playbook.yaml -i repo-setup-edpm-deployment-openstack-edpm-ipam],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:ANSIBLE_VERBOSITY,Value:2,ValueFrom:nil,},EnvVar{Name:RUNNER_PLAYBOOK,Value: Nov 25 10:03:37 crc kubenswrapper[4854]: - hosts: all Nov 25 10:03:37 crc kubenswrapper[4854]: strategy: linear Nov 25 10:03:37 crc kubenswrapper[4854]: tasks: Nov 25 10:03:37 crc kubenswrapper[4854]: - name: Enable podified-repos Nov 25 10:03:37 crc kubenswrapper[4854]: become: true Nov 25 10:03:37 crc kubenswrapper[4854]: ansible.builtin.shell: | Nov 25 10:03:37 crc kubenswrapper[4854]: set -euxo pipefail Nov 25 10:03:37 crc kubenswrapper[4854]: pushd /var/tmp Nov 25 10:03:37 crc kubenswrapper[4854]: curl -sL https://github.com/openstack-k8s-operators/repo-setup/archive/refs/heads/main.tar.gz | tar -xz Nov 25 10:03:37 crc kubenswrapper[4854]: pushd repo-setup-main Nov 25 10:03:37 crc kubenswrapper[4854]: python3 -m venv ./venv Nov 25 10:03:37 crc kubenswrapper[4854]: PBR_VERSION=0.0.0 ./venv/bin/pip install ./ Nov 25 10:03:37 crc kubenswrapper[4854]: ./venv/bin/repo-setup current-podified -b antelope Nov 25 10:03:37 crc kubenswrapper[4854]: popd Nov 25 10:03:37 crc kubenswrapper[4854]: rm -rf repo-setup-main Nov 25 10:03:37 crc kubenswrapper[4854]: Nov 25 10:03:37 crc kubenswrapper[4854]: Nov 25 10:03:37 crc kubenswrapper[4854]: ,ValueFrom:nil,},EnvVar{Name:RUNNER_EXTRA_VARS,Value: Nov 25 10:03:37 crc kubenswrapper[4854]: edpm_override_hosts: openstack-edpm-ipam Nov 25 10:03:37 crc kubenswrapper[4854]: edpm_service_type: repo-setup Nov 25 10:03:37 crc kubenswrapper[4854]: Nov 25 10:03:37 crc kubenswrapper[4854]: Nov 25 10:03:37 crc kubenswrapper[4854]: ,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:repo-setup-combined-ca-bundle,ReadOnly:false,MountPath:/var/lib/openstack/cacerts/repo-setup,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key,ReadOnly:false,MountPath:/runner/env/ssh_key,SubPath:ssh_key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:inventory,ReadOnly:false,MountPath:/runner/inventory/hosts,SubPath:inventory,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9fvgf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:openstack-aee-default-env,},Optional:*true,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb_openstack(10f4bf87-5b7e-4077-8d81-13f86562549e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled Nov 25 10:03:37 crc kubenswrapper[4854]: > logger="UnhandledError" Nov 25 10:03:37 crc kubenswrapper[4854]: E1125 10:03:37.068173 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"repo-setup-edpm-deployment-openstack-edpm-ipam\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb" podUID="10f4bf87-5b7e-4077-8d81-13f86562549e" Nov 25 10:03:37 crc kubenswrapper[4854]: I1125 10:03:37.678541 4854 generic.go:334] "Generic (PLEG): container finished" podID="15d8094a-7d5b-4f52-8ef0-388820ead440" containerID="73ec5619bc7ec39b579dad55992bd7d6a28694fb9e422d3e23d8473b47a0f2f9" exitCode=0 Nov 25 10:03:37 crc kubenswrapper[4854]: I1125 10:03:37.679721 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-f5c9d8946-qfdms" event={"ID":"15d8094a-7d5b-4f52-8ef0-388820ead440","Type":"ContainerDied","Data":"73ec5619bc7ec39b579dad55992bd7d6a28694fb9e422d3e23d8473b47a0f2f9"} Nov 25 10:03:37 crc kubenswrapper[4854]: E1125 10:03:37.681650 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"repo-setup-edpm-deployment-openstack-edpm-ipam\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest\\\"\"" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb" podUID="10f4bf87-5b7e-4077-8d81-13f86562549e" Nov 25 10:03:37 crc kubenswrapper[4854]: E1125 10:03:37.788903 4854 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-aodh-api:current-tested" Nov 25 10:03:37 crc kubenswrapper[4854]: E1125 10:03:37.789209 4854 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-aodh-api:current-tested" Nov 25 10:03:37 crc kubenswrapper[4854]: E1125 10:03:37.789367 4854 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:aodh-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-aodh-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:AodhPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:AodhPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:aodh-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pfxww,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42402,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod aodh-db-sync-m27ln_openstack(8be70514-ed7b-499b-acb7-c973bd2590c2): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 10:03:37 crc kubenswrapper[4854]: E1125 10:03:37.790504 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"aodh-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/aodh-db-sync-m27ln" podUID="8be70514-ed7b-499b-acb7-c973bd2590c2" Nov 25 10:03:38 crc kubenswrapper[4854]: I1125 10:03:38.016606 4854 scope.go:117] "RemoveContainer" containerID="e218b542fd934fd34b157757f419e89c8565fa64cb58598ebd3da742271577ef" Nov 25 10:03:38 crc kubenswrapper[4854]: E1125 10:03:38.016859 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:03:38 crc kubenswrapper[4854]: I1125 10:03:38.268541 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-f5c9d8946-qfdms" Nov 25 10:03:38 crc kubenswrapper[4854]: I1125 10:03:38.403824 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jh5cf\" (UniqueName: \"kubernetes.io/projected/15d8094a-7d5b-4f52-8ef0-388820ead440-kube-api-access-jh5cf\") pod \"15d8094a-7d5b-4f52-8ef0-388820ead440\" (UID: \"15d8094a-7d5b-4f52-8ef0-388820ead440\") " Nov 25 10:03:38 crc kubenswrapper[4854]: I1125 10:03:38.403933 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15d8094a-7d5b-4f52-8ef0-388820ead440-combined-ca-bundle\") pod \"15d8094a-7d5b-4f52-8ef0-388820ead440\" (UID: \"15d8094a-7d5b-4f52-8ef0-388820ead440\") " Nov 25 10:03:38 crc kubenswrapper[4854]: I1125 10:03:38.404069 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/15d8094a-7d5b-4f52-8ef0-388820ead440-config-data-custom\") pod \"15d8094a-7d5b-4f52-8ef0-388820ead440\" (UID: \"15d8094a-7d5b-4f52-8ef0-388820ead440\") " Nov 25 10:03:38 crc kubenswrapper[4854]: I1125 10:03:38.404112 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15d8094a-7d5b-4f52-8ef0-388820ead440-config-data\") pod \"15d8094a-7d5b-4f52-8ef0-388820ead440\" (UID: \"15d8094a-7d5b-4f52-8ef0-388820ead440\") " Nov 25 10:03:38 crc kubenswrapper[4854]: I1125 10:03:38.409777 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/15d8094a-7d5b-4f52-8ef0-388820ead440-kube-api-access-jh5cf" (OuterVolumeSpecName: "kube-api-access-jh5cf") pod "15d8094a-7d5b-4f52-8ef0-388820ead440" (UID: "15d8094a-7d5b-4f52-8ef0-388820ead440"). InnerVolumeSpecName "kube-api-access-jh5cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:03:38 crc kubenswrapper[4854]: I1125 10:03:38.410326 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15d8094a-7d5b-4f52-8ef0-388820ead440-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "15d8094a-7d5b-4f52-8ef0-388820ead440" (UID: "15d8094a-7d5b-4f52-8ef0-388820ead440"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:03:38 crc kubenswrapper[4854]: I1125 10:03:38.440744 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15d8094a-7d5b-4f52-8ef0-388820ead440-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "15d8094a-7d5b-4f52-8ef0-388820ead440" (UID: "15d8094a-7d5b-4f52-8ef0-388820ead440"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:03:38 crc kubenswrapper[4854]: I1125 10:03:38.478908 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15d8094a-7d5b-4f52-8ef0-388820ead440-config-data" (OuterVolumeSpecName: "config-data") pod "15d8094a-7d5b-4f52-8ef0-388820ead440" (UID: "15d8094a-7d5b-4f52-8ef0-388820ead440"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:03:38 crc kubenswrapper[4854]: I1125 10:03:38.507066 4854 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/15d8094a-7d5b-4f52-8ef0-388820ead440-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:38 crc kubenswrapper[4854]: I1125 10:03:38.507226 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15d8094a-7d5b-4f52-8ef0-388820ead440-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:38 crc kubenswrapper[4854]: I1125 10:03:38.507291 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jh5cf\" (UniqueName: \"kubernetes.io/projected/15d8094a-7d5b-4f52-8ef0-388820ead440-kube-api-access-jh5cf\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:38 crc kubenswrapper[4854]: I1125 10:03:38.507350 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15d8094a-7d5b-4f52-8ef0-388820ead440-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:38 crc kubenswrapper[4854]: I1125 10:03:38.717875 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-f5c9d8946-qfdms" Nov 25 10:03:38 crc kubenswrapper[4854]: I1125 10:03:38.717924 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-f5c9d8946-qfdms" event={"ID":"15d8094a-7d5b-4f52-8ef0-388820ead440","Type":"ContainerDied","Data":"7e80a8144609011e5483943f3f6f2c54b90ef0aa8c5a8ec48fee6e8060247b7d"} Nov 25 10:03:38 crc kubenswrapper[4854]: I1125 10:03:38.717986 4854 scope.go:117] "RemoveContainer" containerID="73ec5619bc7ec39b579dad55992bd7d6a28694fb9e422d3e23d8473b47a0f2f9" Nov 25 10:03:38 crc kubenswrapper[4854]: E1125 10:03:38.719744 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"aodh-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-aodh-api:current-tested\\\"\"" pod="openstack/aodh-db-sync-m27ln" podUID="8be70514-ed7b-499b-acb7-c973bd2590c2" Nov 25 10:03:38 crc kubenswrapper[4854]: I1125 10:03:38.768267 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-f5c9d8946-qfdms"] Nov 25 10:03:38 crc kubenswrapper[4854]: I1125 10:03:38.781657 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-engine-f5c9d8946-qfdms"] Nov 25 10:03:39 crc kubenswrapper[4854]: I1125 10:03:39.026404 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="15d8094a-7d5b-4f52-8ef0-388820ead440" path="/var/lib/kubelet/pods/15d8094a-7d5b-4f52-8ef0-388820ead440/volumes" Nov 25 10:03:40 crc kubenswrapper[4854]: I1125 10:03:40.816086 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:03:41 crc kubenswrapper[4854]: I1125 10:03:41.584832 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-2" Nov 25 10:03:41 crc kubenswrapper[4854]: I1125 10:03:41.639065 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-1"] Nov 25 10:03:45 crc kubenswrapper[4854]: I1125 10:03:45.934867 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-1" podUID="c24229dd-3c9c-47b6-8080-a1d51e0e6868" containerName="rabbitmq" containerID="cri-o://b1ab910acfdf23678ef8580d24c8bdfd98cbcbb3917348cb8b0ddf0c2730553b" gracePeriod=604796 Nov 25 10:03:49 crc kubenswrapper[4854]: I1125 10:03:49.014444 4854 scope.go:117] "RemoveContainer" containerID="e218b542fd934fd34b157757f419e89c8565fa64cb58598ebd3da742271577ef" Nov 25 10:03:49 crc kubenswrapper[4854]: E1125 10:03:49.015494 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:03:49 crc kubenswrapper[4854]: I1125 10:03:49.450366 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:03:49 crc kubenswrapper[4854]: I1125 10:03:49.850215 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb" event={"ID":"10f4bf87-5b7e-4077-8d81-13f86562549e","Type":"ContainerStarted","Data":"193775105dfa799ae721ebe65ef37b764f581c5881de4e5dd39f8304977d85df"} Nov 25 10:03:49 crc kubenswrapper[4854]: I1125 10:03:49.876935 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb" podStartSLOduration=2.471739744 podStartE2EDuration="34.876912578s" podCreationTimestamp="2025-11-25 10:03:15 +0000 UTC" firstStartedPulling="2025-11-25 10:03:17.041380653 +0000 UTC m=+1602.894374029" lastFinishedPulling="2025-11-25 10:03:49.446553487 +0000 UTC m=+1635.299546863" observedRunningTime="2025-11-25 10:03:49.864422704 +0000 UTC m=+1635.717416090" watchObservedRunningTime="2025-11-25 10:03:49.876912578 +0000 UTC m=+1635.729905954" Nov 25 10:03:50 crc kubenswrapper[4854]: I1125 10:03:50.235108 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 25 10:03:50 crc kubenswrapper[4854]: I1125 10:03:50.863094 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-m27ln" event={"ID":"8be70514-ed7b-499b-acb7-c973bd2590c2","Type":"ContainerStarted","Data":"69a52e14d54bdf765bafe9eea6ac6093d5656f2f0ca9ac75d706f8305de1b6cc"} Nov 25 10:03:50 crc kubenswrapper[4854]: I1125 10:03:50.885972 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-sync-m27ln" podStartSLOduration=2.980496986 podStartE2EDuration="30.885945522s" podCreationTimestamp="2025-11-25 10:03:20 +0000 UTC" firstStartedPulling="2025-11-25 10:03:22.327170481 +0000 UTC m=+1608.180163857" lastFinishedPulling="2025-11-25 10:03:50.232619017 +0000 UTC m=+1636.085612393" observedRunningTime="2025-11-25 10:03:50.879554016 +0000 UTC m=+1636.732547392" watchObservedRunningTime="2025-11-25 10:03:50.885945522 +0000 UTC m=+1636.738938898" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.029410 4854 scope.go:117] "RemoveContainer" containerID="8a2ba695aa65f5595c082832553b14e212611b74d1bd8c5370c264d95f5a39db" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.060606 4854 scope.go:117] "RemoveContainer" containerID="fe24d6444f38d01e85737fd96dfd356629d78a085cd6fce42d85db11a1f92bbe" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.616094 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-1" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.706810 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c24229dd-3c9c-47b6-8080-a1d51e0e6868-rabbitmq-erlang-cookie\") pod \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.707184 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c24229dd-3c9c-47b6-8080-a1d51e0e6868-rabbitmq-plugins\") pod \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.707281 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c24229dd-3c9c-47b6-8080-a1d51e0e6868-pod-info\") pod \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.707377 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c24229dd-3c9c-47b6-8080-a1d51e0e6868-plugins-conf\") pod \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.707426 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c24229dd-3c9c-47b6-8080-a1d51e0e6868-erlang-cookie-secret\") pod \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.707458 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c24229dd-3c9c-47b6-8080-a1d51e0e6868-rabbitmq-tls\") pod \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.707551 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.707572 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c24229dd-3c9c-47b6-8080-a1d51e0e6868-server-conf\") pod \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.707595 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bgg4h\" (UniqueName: \"kubernetes.io/projected/c24229dd-3c9c-47b6-8080-a1d51e0e6868-kube-api-access-bgg4h\") pod \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.707646 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c24229dd-3c9c-47b6-8080-a1d51e0e6868-rabbitmq-confd\") pod \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.707701 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c24229dd-3c9c-47b6-8080-a1d51e0e6868-config-data\") pod \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\" (UID: \"c24229dd-3c9c-47b6-8080-a1d51e0e6868\") " Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.710230 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c24229dd-3c9c-47b6-8080-a1d51e0e6868-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "c24229dd-3c9c-47b6-8080-a1d51e0e6868" (UID: "c24229dd-3c9c-47b6-8080-a1d51e0e6868"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.711175 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c24229dd-3c9c-47b6-8080-a1d51e0e6868-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "c24229dd-3c9c-47b6-8080-a1d51e0e6868" (UID: "c24229dd-3c9c-47b6-8080-a1d51e0e6868"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.711945 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c24229dd-3c9c-47b6-8080-a1d51e0e6868-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "c24229dd-3c9c-47b6-8080-a1d51e0e6868" (UID: "c24229dd-3c9c-47b6-8080-a1d51e0e6868"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.715964 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/c24229dd-3c9c-47b6-8080-a1d51e0e6868-pod-info" (OuterVolumeSpecName: "pod-info") pod "c24229dd-3c9c-47b6-8080-a1d51e0e6868" (UID: "c24229dd-3c9c-47b6-8080-a1d51e0e6868"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.718585 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c24229dd-3c9c-47b6-8080-a1d51e0e6868-kube-api-access-bgg4h" (OuterVolumeSpecName: "kube-api-access-bgg4h") pod "c24229dd-3c9c-47b6-8080-a1d51e0e6868" (UID: "c24229dd-3c9c-47b6-8080-a1d51e0e6868"). InnerVolumeSpecName "kube-api-access-bgg4h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.719012 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c24229dd-3c9c-47b6-8080-a1d51e0e6868-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "c24229dd-3c9c-47b6-8080-a1d51e0e6868" (UID: "c24229dd-3c9c-47b6-8080-a1d51e0e6868"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.719384 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c24229dd-3c9c-47b6-8080-a1d51e0e6868-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "c24229dd-3c9c-47b6-8080-a1d51e0e6868" (UID: "c24229dd-3c9c-47b6-8080-a1d51e0e6868"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.724136 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "persistence") pod "c24229dd-3c9c-47b6-8080-a1d51e0e6868" (UID: "c24229dd-3c9c-47b6-8080-a1d51e0e6868"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.778923 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c24229dd-3c9c-47b6-8080-a1d51e0e6868-config-data" (OuterVolumeSpecName: "config-data") pod "c24229dd-3c9c-47b6-8080-a1d51e0e6868" (UID: "c24229dd-3c9c-47b6-8080-a1d51e0e6868"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.810429 4854 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c24229dd-3c9c-47b6-8080-a1d51e0e6868-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.810460 4854 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c24229dd-3c9c-47b6-8080-a1d51e0e6868-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.810469 4854 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c24229dd-3c9c-47b6-8080-a1d51e0e6868-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.810491 4854 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.810500 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bgg4h\" (UniqueName: \"kubernetes.io/projected/c24229dd-3c9c-47b6-8080-a1d51e0e6868-kube-api-access-bgg4h\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.810514 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c24229dd-3c9c-47b6-8080-a1d51e0e6868-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.810526 4854 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c24229dd-3c9c-47b6-8080-a1d51e0e6868-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.810537 4854 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c24229dd-3c9c-47b6-8080-a1d51e0e6868-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.810549 4854 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c24229dd-3c9c-47b6-8080-a1d51e0e6868-pod-info\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.814364 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c24229dd-3c9c-47b6-8080-a1d51e0e6868-server-conf" (OuterVolumeSpecName: "server-conf") pod "c24229dd-3c9c-47b6-8080-a1d51e0e6868" (UID: "c24229dd-3c9c-47b6-8080-a1d51e0e6868"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.844074 4854 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.874172 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c24229dd-3c9c-47b6-8080-a1d51e0e6868-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "c24229dd-3c9c-47b6-8080-a1d51e0e6868" (UID: "c24229dd-3c9c-47b6-8080-a1d51e0e6868"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.889767 4854 generic.go:334] "Generic (PLEG): container finished" podID="c24229dd-3c9c-47b6-8080-a1d51e0e6868" containerID="b1ab910acfdf23678ef8580d24c8bdfd98cbcbb3917348cb8b0ddf0c2730553b" exitCode=0 Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.889824 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"c24229dd-3c9c-47b6-8080-a1d51e0e6868","Type":"ContainerDied","Data":"b1ab910acfdf23678ef8580d24c8bdfd98cbcbb3917348cb8b0ddf0c2730553b"} Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.889856 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-1" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.889867 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"c24229dd-3c9c-47b6-8080-a1d51e0e6868","Type":"ContainerDied","Data":"8cee4606fda48fe9bc42eeeb24883c30e5d6d60a1f564fc6a2aece994a2bd598"} Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.889888 4854 scope.go:117] "RemoveContainer" containerID="b1ab910acfdf23678ef8580d24c8bdfd98cbcbb3917348cb8b0ddf0c2730553b" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.921430 4854 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c24229dd-3c9c-47b6-8080-a1d51e0e6868-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.921475 4854 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.921491 4854 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c24229dd-3c9c-47b6-8080-a1d51e0e6868-server-conf\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.930847 4854 scope.go:117] "RemoveContainer" containerID="18a3e87a49d1ced9b4d2c37ea39a103b0fcdb27717118ecf2741f03bc71c3c0e" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.936193 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-1"] Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.958682 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-1"] Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.962852 4854 scope.go:117] "RemoveContainer" containerID="b1ab910acfdf23678ef8580d24c8bdfd98cbcbb3917348cb8b0ddf0c2730553b" Nov 25 10:03:52 crc kubenswrapper[4854]: E1125 10:03:52.967337 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b1ab910acfdf23678ef8580d24c8bdfd98cbcbb3917348cb8b0ddf0c2730553b\": container with ID starting with b1ab910acfdf23678ef8580d24c8bdfd98cbcbb3917348cb8b0ddf0c2730553b not found: ID does not exist" containerID="b1ab910acfdf23678ef8580d24c8bdfd98cbcbb3917348cb8b0ddf0c2730553b" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.967393 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1ab910acfdf23678ef8580d24c8bdfd98cbcbb3917348cb8b0ddf0c2730553b"} err="failed to get container status \"b1ab910acfdf23678ef8580d24c8bdfd98cbcbb3917348cb8b0ddf0c2730553b\": rpc error: code = NotFound desc = could not find container \"b1ab910acfdf23678ef8580d24c8bdfd98cbcbb3917348cb8b0ddf0c2730553b\": container with ID starting with b1ab910acfdf23678ef8580d24c8bdfd98cbcbb3917348cb8b0ddf0c2730553b not found: ID does not exist" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.967420 4854 scope.go:117] "RemoveContainer" containerID="18a3e87a49d1ced9b4d2c37ea39a103b0fcdb27717118ecf2741f03bc71c3c0e" Nov 25 10:03:52 crc kubenswrapper[4854]: E1125 10:03:52.968001 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"18a3e87a49d1ced9b4d2c37ea39a103b0fcdb27717118ecf2741f03bc71c3c0e\": container with ID starting with 18a3e87a49d1ced9b4d2c37ea39a103b0fcdb27717118ecf2741f03bc71c3c0e not found: ID does not exist" containerID="18a3e87a49d1ced9b4d2c37ea39a103b0fcdb27717118ecf2741f03bc71c3c0e" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.968124 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18a3e87a49d1ced9b4d2c37ea39a103b0fcdb27717118ecf2741f03bc71c3c0e"} err="failed to get container status \"18a3e87a49d1ced9b4d2c37ea39a103b0fcdb27717118ecf2741f03bc71c3c0e\": rpc error: code = NotFound desc = could not find container \"18a3e87a49d1ced9b4d2c37ea39a103b0fcdb27717118ecf2741f03bc71c3c0e\": container with ID starting with 18a3e87a49d1ced9b4d2c37ea39a103b0fcdb27717118ecf2741f03bc71c3c0e not found: ID does not exist" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.975712 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-1"] Nov 25 10:03:52 crc kubenswrapper[4854]: E1125 10:03:52.976298 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c24229dd-3c9c-47b6-8080-a1d51e0e6868" containerName="rabbitmq" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.976390 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="c24229dd-3c9c-47b6-8080-a1d51e0e6868" containerName="rabbitmq" Nov 25 10:03:52 crc kubenswrapper[4854]: E1125 10:03:52.976458 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c24229dd-3c9c-47b6-8080-a1d51e0e6868" containerName="setup-container" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.976518 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="c24229dd-3c9c-47b6-8080-a1d51e0e6868" containerName="setup-container" Nov 25 10:03:52 crc kubenswrapper[4854]: E1125 10:03:52.976617 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15d8094a-7d5b-4f52-8ef0-388820ead440" containerName="heat-engine" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.976689 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="15d8094a-7d5b-4f52-8ef0-388820ead440" containerName="heat-engine" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.976979 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="15d8094a-7d5b-4f52-8ef0-388820ead440" containerName="heat-engine" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.977076 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="c24229dd-3c9c-47b6-8080-a1d51e0e6868" containerName="rabbitmq" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.978623 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-1" Nov 25 10:03:52 crc kubenswrapper[4854]: I1125 10:03:52.987771 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-1"] Nov 25 10:03:53 crc kubenswrapper[4854]: I1125 10:03:53.051232 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c24229dd-3c9c-47b6-8080-a1d51e0e6868" path="/var/lib/kubelet/pods/c24229dd-3c9c-47b6-8080-a1d51e0e6868/volumes" Nov 25 10:03:53 crc kubenswrapper[4854]: I1125 10:03:53.136088 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e-rabbitmq-plugins\") pod \"rabbitmq-server-1\" (UID: \"1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e\") " pod="openstack/rabbitmq-server-1" Nov 25 10:03:53 crc kubenswrapper[4854]: I1125 10:03:53.136415 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e-rabbitmq-tls\") pod \"rabbitmq-server-1\" (UID: \"1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e\") " pod="openstack/rabbitmq-server-1" Nov 25 10:03:53 crc kubenswrapper[4854]: I1125 10:03:53.136585 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e-pod-info\") pod \"rabbitmq-server-1\" (UID: \"1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e\") " pod="openstack/rabbitmq-server-1" Nov 25 10:03:53 crc kubenswrapper[4854]: I1125 10:03:53.136814 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e-erlang-cookie-secret\") pod \"rabbitmq-server-1\" (UID: \"1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e\") " pod="openstack/rabbitmq-server-1" Nov 25 10:03:53 crc kubenswrapper[4854]: I1125 10:03:53.136851 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-server-1\" (UID: \"1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e\") " pod="openstack/rabbitmq-server-1" Nov 25 10:03:53 crc kubenswrapper[4854]: I1125 10:03:53.137012 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e-plugins-conf\") pod \"rabbitmq-server-1\" (UID: \"1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e\") " pod="openstack/rabbitmq-server-1" Nov 25 10:03:53 crc kubenswrapper[4854]: I1125 10:03:53.137044 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e-server-conf\") pod \"rabbitmq-server-1\" (UID: \"1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e\") " pod="openstack/rabbitmq-server-1" Nov 25 10:03:53 crc kubenswrapper[4854]: I1125 10:03:53.137254 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xt57h\" (UniqueName: \"kubernetes.io/projected/1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e-kube-api-access-xt57h\") pod \"rabbitmq-server-1\" (UID: \"1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e\") " pod="openstack/rabbitmq-server-1" Nov 25 10:03:53 crc kubenswrapper[4854]: I1125 10:03:53.137303 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-1\" (UID: \"1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e\") " pod="openstack/rabbitmq-server-1" Nov 25 10:03:53 crc kubenswrapper[4854]: I1125 10:03:53.137348 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e-rabbitmq-confd\") pod \"rabbitmq-server-1\" (UID: \"1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e\") " pod="openstack/rabbitmq-server-1" Nov 25 10:03:53 crc kubenswrapper[4854]: I1125 10:03:53.137436 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e-config-data\") pod \"rabbitmq-server-1\" (UID: \"1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e\") " pod="openstack/rabbitmq-server-1" Nov 25 10:03:53 crc kubenswrapper[4854]: I1125 10:03:53.239254 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e-rabbitmq-confd\") pod \"rabbitmq-server-1\" (UID: \"1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e\") " pod="openstack/rabbitmq-server-1" Nov 25 10:03:53 crc kubenswrapper[4854]: I1125 10:03:53.239311 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e-config-data\") pod \"rabbitmq-server-1\" (UID: \"1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e\") " pod="openstack/rabbitmq-server-1" Nov 25 10:03:53 crc kubenswrapper[4854]: I1125 10:03:53.239348 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e-rabbitmq-plugins\") pod \"rabbitmq-server-1\" (UID: \"1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e\") " pod="openstack/rabbitmq-server-1" Nov 25 10:03:53 crc kubenswrapper[4854]: I1125 10:03:53.239441 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e-rabbitmq-tls\") pod \"rabbitmq-server-1\" (UID: \"1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e\") " pod="openstack/rabbitmq-server-1" Nov 25 10:03:53 crc kubenswrapper[4854]: I1125 10:03:53.239476 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e-pod-info\") pod \"rabbitmq-server-1\" (UID: \"1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e\") " pod="openstack/rabbitmq-server-1" Nov 25 10:03:53 crc kubenswrapper[4854]: I1125 10:03:53.239535 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e-erlang-cookie-secret\") pod \"rabbitmq-server-1\" (UID: \"1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e\") " pod="openstack/rabbitmq-server-1" Nov 25 10:03:53 crc kubenswrapper[4854]: I1125 10:03:53.239558 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-server-1\" (UID: \"1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e\") " pod="openstack/rabbitmq-server-1" Nov 25 10:03:53 crc kubenswrapper[4854]: I1125 10:03:53.239588 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e-plugins-conf\") pod \"rabbitmq-server-1\" (UID: \"1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e\") " pod="openstack/rabbitmq-server-1" Nov 25 10:03:53 crc kubenswrapper[4854]: I1125 10:03:53.239603 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e-server-conf\") pod \"rabbitmq-server-1\" (UID: \"1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e\") " pod="openstack/rabbitmq-server-1" Nov 25 10:03:53 crc kubenswrapper[4854]: I1125 10:03:53.239647 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xt57h\" (UniqueName: \"kubernetes.io/projected/1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e-kube-api-access-xt57h\") pod \"rabbitmq-server-1\" (UID: \"1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e\") " pod="openstack/rabbitmq-server-1" Nov 25 10:03:53 crc kubenswrapper[4854]: I1125 10:03:53.239685 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-1\" (UID: \"1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e\") " pod="openstack/rabbitmq-server-1" Nov 25 10:03:53 crc kubenswrapper[4854]: I1125 10:03:53.240253 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e-config-data\") pod \"rabbitmq-server-1\" (UID: \"1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e\") " pod="openstack/rabbitmq-server-1" Nov 25 10:03:53 crc kubenswrapper[4854]: I1125 10:03:53.240511 4854 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-server-1\" (UID: \"1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/rabbitmq-server-1" Nov 25 10:03:53 crc kubenswrapper[4854]: I1125 10:03:53.243332 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e-plugins-conf\") pod \"rabbitmq-server-1\" (UID: \"1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e\") " pod="openstack/rabbitmq-server-1" Nov 25 10:03:53 crc kubenswrapper[4854]: I1125 10:03:53.243786 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e-rabbitmq-plugins\") pod \"rabbitmq-server-1\" (UID: \"1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e\") " pod="openstack/rabbitmq-server-1" Nov 25 10:03:53 crc kubenswrapper[4854]: I1125 10:03:53.243948 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e-pod-info\") pod \"rabbitmq-server-1\" (UID: \"1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e\") " pod="openstack/rabbitmq-server-1" Nov 25 10:03:53 crc kubenswrapper[4854]: I1125 10:03:53.244735 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e-server-conf\") pod \"rabbitmq-server-1\" (UID: \"1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e\") " pod="openstack/rabbitmq-server-1" Nov 25 10:03:53 crc kubenswrapper[4854]: I1125 10:03:53.244842 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-1\" (UID: \"1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e\") " pod="openstack/rabbitmq-server-1" Nov 25 10:03:53 crc kubenswrapper[4854]: I1125 10:03:53.248587 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e-erlang-cookie-secret\") pod \"rabbitmq-server-1\" (UID: \"1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e\") " pod="openstack/rabbitmq-server-1" Nov 25 10:03:53 crc kubenswrapper[4854]: I1125 10:03:53.249042 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e-rabbitmq-confd\") pod \"rabbitmq-server-1\" (UID: \"1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e\") " pod="openstack/rabbitmq-server-1" Nov 25 10:03:53 crc kubenswrapper[4854]: I1125 10:03:53.269134 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xt57h\" (UniqueName: \"kubernetes.io/projected/1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e-kube-api-access-xt57h\") pod \"rabbitmq-server-1\" (UID: \"1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e\") " pod="openstack/rabbitmq-server-1" Nov 25 10:03:53 crc kubenswrapper[4854]: I1125 10:03:53.291169 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e-rabbitmq-tls\") pod \"rabbitmq-server-1\" (UID: \"1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e\") " pod="openstack/rabbitmq-server-1" Nov 25 10:03:53 crc kubenswrapper[4854]: I1125 10:03:53.365091 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-server-1\" (UID: \"1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e\") " pod="openstack/rabbitmq-server-1" Nov 25 10:03:53 crc kubenswrapper[4854]: I1125 10:03:53.629697 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-1" Nov 25 10:03:53 crc kubenswrapper[4854]: I1125 10:03:53.910317 4854 generic.go:334] "Generic (PLEG): container finished" podID="8be70514-ed7b-499b-acb7-c973bd2590c2" containerID="69a52e14d54bdf765bafe9eea6ac6093d5656f2f0ca9ac75d706f8305de1b6cc" exitCode=0 Nov 25 10:03:53 crc kubenswrapper[4854]: I1125 10:03:53.910406 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-m27ln" event={"ID":"8be70514-ed7b-499b-acb7-c973bd2590c2","Type":"ContainerDied","Data":"69a52e14d54bdf765bafe9eea6ac6093d5656f2f0ca9ac75d706f8305de1b6cc"} Nov 25 10:03:54 crc kubenswrapper[4854]: W1125 10:03:54.204965 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1fe6c6c2_abbf_4014_a82d_4103f3fc9e3e.slice/crio-41ee024f44c092c89077094db3d863908954ef76ac9ba19bead3e27ec8b2a31f WatchSource:0}: Error finding container 41ee024f44c092c89077094db3d863908954ef76ac9ba19bead3e27ec8b2a31f: Status 404 returned error can't find the container with id 41ee024f44c092c89077094db3d863908954ef76ac9ba19bead3e27ec8b2a31f Nov 25 10:03:54 crc kubenswrapper[4854]: I1125 10:03:54.206197 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-1"] Nov 25 10:03:54 crc kubenswrapper[4854]: I1125 10:03:54.923629 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e","Type":"ContainerStarted","Data":"41ee024f44c092c89077094db3d863908954ef76ac9ba19bead3e27ec8b2a31f"} Nov 25 10:03:55 crc kubenswrapper[4854]: I1125 10:03:55.633225 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-m27ln" Nov 25 10:03:55 crc kubenswrapper[4854]: I1125 10:03:55.705921 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8be70514-ed7b-499b-acb7-c973bd2590c2-scripts\") pod \"8be70514-ed7b-499b-acb7-c973bd2590c2\" (UID: \"8be70514-ed7b-499b-acb7-c973bd2590c2\") " Nov 25 10:03:55 crc kubenswrapper[4854]: I1125 10:03:55.707174 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8be70514-ed7b-499b-acb7-c973bd2590c2-config-data\") pod \"8be70514-ed7b-499b-acb7-c973bd2590c2\" (UID: \"8be70514-ed7b-499b-acb7-c973bd2590c2\") " Nov 25 10:03:55 crc kubenswrapper[4854]: I1125 10:03:55.707550 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8be70514-ed7b-499b-acb7-c973bd2590c2-combined-ca-bundle\") pod \"8be70514-ed7b-499b-acb7-c973bd2590c2\" (UID: \"8be70514-ed7b-499b-acb7-c973bd2590c2\") " Nov 25 10:03:55 crc kubenswrapper[4854]: I1125 10:03:55.707845 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pfxww\" (UniqueName: \"kubernetes.io/projected/8be70514-ed7b-499b-acb7-c973bd2590c2-kube-api-access-pfxww\") pod \"8be70514-ed7b-499b-acb7-c973bd2590c2\" (UID: \"8be70514-ed7b-499b-acb7-c973bd2590c2\") " Nov 25 10:03:55 crc kubenswrapper[4854]: I1125 10:03:55.713560 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8be70514-ed7b-499b-acb7-c973bd2590c2-scripts" (OuterVolumeSpecName: "scripts") pod "8be70514-ed7b-499b-acb7-c973bd2590c2" (UID: "8be70514-ed7b-499b-acb7-c973bd2590c2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:03:55 crc kubenswrapper[4854]: I1125 10:03:55.714656 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8be70514-ed7b-499b-acb7-c973bd2590c2-kube-api-access-pfxww" (OuterVolumeSpecName: "kube-api-access-pfxww") pod "8be70514-ed7b-499b-acb7-c973bd2590c2" (UID: "8be70514-ed7b-499b-acb7-c973bd2590c2"). InnerVolumeSpecName "kube-api-access-pfxww". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:03:55 crc kubenswrapper[4854]: I1125 10:03:55.755565 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8be70514-ed7b-499b-acb7-c973bd2590c2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8be70514-ed7b-499b-acb7-c973bd2590c2" (UID: "8be70514-ed7b-499b-acb7-c973bd2590c2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:03:55 crc kubenswrapper[4854]: I1125 10:03:55.762487 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8be70514-ed7b-499b-acb7-c973bd2590c2-config-data" (OuterVolumeSpecName: "config-data") pod "8be70514-ed7b-499b-acb7-c973bd2590c2" (UID: "8be70514-ed7b-499b-acb7-c973bd2590c2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:03:55 crc kubenswrapper[4854]: I1125 10:03:55.811396 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8be70514-ed7b-499b-acb7-c973bd2590c2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:55 crc kubenswrapper[4854]: I1125 10:03:55.811437 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pfxww\" (UniqueName: \"kubernetes.io/projected/8be70514-ed7b-499b-acb7-c973bd2590c2-kube-api-access-pfxww\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:55 crc kubenswrapper[4854]: I1125 10:03:55.811450 4854 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8be70514-ed7b-499b-acb7-c973bd2590c2-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:55 crc kubenswrapper[4854]: I1125 10:03:55.811485 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8be70514-ed7b-499b-acb7-c973bd2590c2-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:03:55 crc kubenswrapper[4854]: I1125 10:03:55.934807 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-m27ln" event={"ID":"8be70514-ed7b-499b-acb7-c973bd2590c2","Type":"ContainerDied","Data":"46511c88abe06384de2ef27f2bb207b4ee63ec6aca40b9136ee7de5e285b0fc3"} Nov 25 10:03:55 crc kubenswrapper[4854]: I1125 10:03:55.934855 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="46511c88abe06384de2ef27f2bb207b4ee63ec6aca40b9136ee7de5e285b0fc3" Nov 25 10:03:55 crc kubenswrapper[4854]: I1125 10:03:55.934857 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-m27ln" Nov 25 10:03:56 crc kubenswrapper[4854]: I1125 10:03:56.110412 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Nov 25 10:03:56 crc kubenswrapper[4854]: I1125 10:03:56.110704 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="d92bb92b-df2f-4cf2-88c0-fe50081f16de" containerName="aodh-api" containerID="cri-o://0dfa1885706bc95273f5a536117653ae1807edacd4c2ca9f6b6ee56ea1c80968" gracePeriod=30 Nov 25 10:03:56 crc kubenswrapper[4854]: I1125 10:03:56.110759 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="d92bb92b-df2f-4cf2-88c0-fe50081f16de" containerName="aodh-listener" containerID="cri-o://c9e498c4bfb41047ac38d60e2d074a3368002a4bfeb64f641fe152ce27c220ea" gracePeriod=30 Nov 25 10:03:56 crc kubenswrapper[4854]: I1125 10:03:56.110782 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="d92bb92b-df2f-4cf2-88c0-fe50081f16de" containerName="aodh-notifier" containerID="cri-o://0b9c5f8e77ea994bde66ef17be03991b2956a962da365e3c1d94679f80f4f14e" gracePeriod=30 Nov 25 10:03:56 crc kubenswrapper[4854]: I1125 10:03:56.110876 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="d92bb92b-df2f-4cf2-88c0-fe50081f16de" containerName="aodh-evaluator" containerID="cri-o://1450678da53a375fa08cc2b99d273ab199a7f517ef32a85340e23d71df01407b" gracePeriod=30 Nov 25 10:03:56 crc kubenswrapper[4854]: I1125 10:03:56.948364 4854 generic.go:334] "Generic (PLEG): container finished" podID="d92bb92b-df2f-4cf2-88c0-fe50081f16de" containerID="1450678da53a375fa08cc2b99d273ab199a7f517ef32a85340e23d71df01407b" exitCode=0 Nov 25 10:03:56 crc kubenswrapper[4854]: I1125 10:03:56.948400 4854 generic.go:334] "Generic (PLEG): container finished" podID="d92bb92b-df2f-4cf2-88c0-fe50081f16de" containerID="0dfa1885706bc95273f5a536117653ae1807edacd4c2ca9f6b6ee56ea1c80968" exitCode=0 Nov 25 10:03:56 crc kubenswrapper[4854]: I1125 10:03:56.948439 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"d92bb92b-df2f-4cf2-88c0-fe50081f16de","Type":"ContainerDied","Data":"1450678da53a375fa08cc2b99d273ab199a7f517ef32a85340e23d71df01407b"} Nov 25 10:03:56 crc kubenswrapper[4854]: I1125 10:03:56.948468 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"d92bb92b-df2f-4cf2-88c0-fe50081f16de","Type":"ContainerDied","Data":"0dfa1885706bc95273f5a536117653ae1807edacd4c2ca9f6b6ee56ea1c80968"} Nov 25 10:03:56 crc kubenswrapper[4854]: I1125 10:03:56.950816 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e","Type":"ContainerStarted","Data":"e33d9bb4eec5d36a4ba737c5894e29bc900a1eea367606ce635522d56049ca5a"} Nov 25 10:03:58 crc kubenswrapper[4854]: I1125 10:03:58.981133 4854 generic.go:334] "Generic (PLEG): container finished" podID="d92bb92b-df2f-4cf2-88c0-fe50081f16de" containerID="c9e498c4bfb41047ac38d60e2d074a3368002a4bfeb64f641fe152ce27c220ea" exitCode=0 Nov 25 10:03:58 crc kubenswrapper[4854]: I1125 10:03:58.981233 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"d92bb92b-df2f-4cf2-88c0-fe50081f16de","Type":"ContainerDied","Data":"c9e498c4bfb41047ac38d60e2d074a3368002a4bfeb64f641fe152ce27c220ea"} Nov 25 10:04:03 crc kubenswrapper[4854]: I1125 10:04:03.013809 4854 scope.go:117] "RemoveContainer" containerID="e218b542fd934fd34b157757f419e89c8565fa64cb58598ebd3da742271577ef" Nov 25 10:04:03 crc kubenswrapper[4854]: E1125 10:04:03.014691 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:04:03 crc kubenswrapper[4854]: I1125 10:04:03.031840 4854 generic.go:334] "Generic (PLEG): container finished" podID="10f4bf87-5b7e-4077-8d81-13f86562549e" containerID="193775105dfa799ae721ebe65ef37b764f581c5881de4e5dd39f8304977d85df" exitCode=0 Nov 25 10:04:03 crc kubenswrapper[4854]: I1125 10:04:03.031880 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb" event={"ID":"10f4bf87-5b7e-4077-8d81-13f86562549e","Type":"ContainerDied","Data":"193775105dfa799ae721ebe65ef37b764f581c5881de4e5dd39f8304977d85df"} Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.033334 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.058940 4854 generic.go:334] "Generic (PLEG): container finished" podID="d92bb92b-df2f-4cf2-88c0-fe50081f16de" containerID="0b9c5f8e77ea994bde66ef17be03991b2956a962da365e3c1d94679f80f4f14e" exitCode=0 Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.059018 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"d92bb92b-df2f-4cf2-88c0-fe50081f16de","Type":"ContainerDied","Data":"0b9c5f8e77ea994bde66ef17be03991b2956a962da365e3c1d94679f80f4f14e"} Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.059046 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.076062 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"d92bb92b-df2f-4cf2-88c0-fe50081f16de","Type":"ContainerDied","Data":"45dddcce40d9345b5807de49ff2b5c9e869805cc2115b4890f38ffe44da32626"} Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.076229 4854 scope.go:117] "RemoveContainer" containerID="c9e498c4bfb41047ac38d60e2d074a3368002a4bfeb64f641fe152ce27c220ea" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.123395 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d92bb92b-df2f-4cf2-88c0-fe50081f16de-public-tls-certs\") pod \"d92bb92b-df2f-4cf2-88c0-fe50081f16de\" (UID: \"d92bb92b-df2f-4cf2-88c0-fe50081f16de\") " Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.123821 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d92bb92b-df2f-4cf2-88c0-fe50081f16de-internal-tls-certs\") pod \"d92bb92b-df2f-4cf2-88c0-fe50081f16de\" (UID: \"d92bb92b-df2f-4cf2-88c0-fe50081f16de\") " Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.123874 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d92bb92b-df2f-4cf2-88c0-fe50081f16de-config-data\") pod \"d92bb92b-df2f-4cf2-88c0-fe50081f16de\" (UID: \"d92bb92b-df2f-4cf2-88c0-fe50081f16de\") " Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.123955 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d92bb92b-df2f-4cf2-88c0-fe50081f16de-scripts\") pod \"d92bb92b-df2f-4cf2-88c0-fe50081f16de\" (UID: \"d92bb92b-df2f-4cf2-88c0-fe50081f16de\") " Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.123982 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d92bb92b-df2f-4cf2-88c0-fe50081f16de-combined-ca-bundle\") pod \"d92bb92b-df2f-4cf2-88c0-fe50081f16de\" (UID: \"d92bb92b-df2f-4cf2-88c0-fe50081f16de\") " Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.124087 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hf5fj\" (UniqueName: \"kubernetes.io/projected/d92bb92b-df2f-4cf2-88c0-fe50081f16de-kube-api-access-hf5fj\") pod \"d92bb92b-df2f-4cf2-88c0-fe50081f16de\" (UID: \"d92bb92b-df2f-4cf2-88c0-fe50081f16de\") " Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.130957 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d92bb92b-df2f-4cf2-88c0-fe50081f16de-scripts" (OuterVolumeSpecName: "scripts") pod "d92bb92b-df2f-4cf2-88c0-fe50081f16de" (UID: "d92bb92b-df2f-4cf2-88c0-fe50081f16de"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.142001 4854 scope.go:117] "RemoveContainer" containerID="0b9c5f8e77ea994bde66ef17be03991b2956a962da365e3c1d94679f80f4f14e" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.150974 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d92bb92b-df2f-4cf2-88c0-fe50081f16de-kube-api-access-hf5fj" (OuterVolumeSpecName: "kube-api-access-hf5fj") pod "d92bb92b-df2f-4cf2-88c0-fe50081f16de" (UID: "d92bb92b-df2f-4cf2-88c0-fe50081f16de"). InnerVolumeSpecName "kube-api-access-hf5fj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.227091 4854 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d92bb92b-df2f-4cf2-88c0-fe50081f16de-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.227121 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hf5fj\" (UniqueName: \"kubernetes.io/projected/d92bb92b-df2f-4cf2-88c0-fe50081f16de-kube-api-access-hf5fj\") on node \"crc\" DevicePath \"\"" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.278663 4854 scope.go:117] "RemoveContainer" containerID="1450678da53a375fa08cc2b99d273ab199a7f517ef32a85340e23d71df01407b" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.306260 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d92bb92b-df2f-4cf2-88c0-fe50081f16de-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "d92bb92b-df2f-4cf2-88c0-fe50081f16de" (UID: "d92bb92b-df2f-4cf2-88c0-fe50081f16de"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.311404 4854 scope.go:117] "RemoveContainer" containerID="0dfa1885706bc95273f5a536117653ae1807edacd4c2ca9f6b6ee56ea1c80968" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.311617 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d92bb92b-df2f-4cf2-88c0-fe50081f16de-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "d92bb92b-df2f-4cf2-88c0-fe50081f16de" (UID: "d92bb92b-df2f-4cf2-88c0-fe50081f16de"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.320326 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d92bb92b-df2f-4cf2-88c0-fe50081f16de-config-data" (OuterVolumeSpecName: "config-data") pod "d92bb92b-df2f-4cf2-88c0-fe50081f16de" (UID: "d92bb92b-df2f-4cf2-88c0-fe50081f16de"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.330024 4854 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d92bb92b-df2f-4cf2-88c0-fe50081f16de-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.330084 4854 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d92bb92b-df2f-4cf2-88c0-fe50081f16de-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.330098 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d92bb92b-df2f-4cf2-88c0-fe50081f16de-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.343994 4854 scope.go:117] "RemoveContainer" containerID="c9e498c4bfb41047ac38d60e2d074a3368002a4bfeb64f641fe152ce27c220ea" Nov 25 10:04:04 crc kubenswrapper[4854]: E1125 10:04:04.344533 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c9e498c4bfb41047ac38d60e2d074a3368002a4bfeb64f641fe152ce27c220ea\": container with ID starting with c9e498c4bfb41047ac38d60e2d074a3368002a4bfeb64f641fe152ce27c220ea not found: ID does not exist" containerID="c9e498c4bfb41047ac38d60e2d074a3368002a4bfeb64f641fe152ce27c220ea" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.344582 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9e498c4bfb41047ac38d60e2d074a3368002a4bfeb64f641fe152ce27c220ea"} err="failed to get container status \"c9e498c4bfb41047ac38d60e2d074a3368002a4bfeb64f641fe152ce27c220ea\": rpc error: code = NotFound desc = could not find container \"c9e498c4bfb41047ac38d60e2d074a3368002a4bfeb64f641fe152ce27c220ea\": container with ID starting with c9e498c4bfb41047ac38d60e2d074a3368002a4bfeb64f641fe152ce27c220ea not found: ID does not exist" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.344610 4854 scope.go:117] "RemoveContainer" containerID="0b9c5f8e77ea994bde66ef17be03991b2956a962da365e3c1d94679f80f4f14e" Nov 25 10:04:04 crc kubenswrapper[4854]: E1125 10:04:04.344996 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0b9c5f8e77ea994bde66ef17be03991b2956a962da365e3c1d94679f80f4f14e\": container with ID starting with 0b9c5f8e77ea994bde66ef17be03991b2956a962da365e3c1d94679f80f4f14e not found: ID does not exist" containerID="0b9c5f8e77ea994bde66ef17be03991b2956a962da365e3c1d94679f80f4f14e" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.345074 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0b9c5f8e77ea994bde66ef17be03991b2956a962da365e3c1d94679f80f4f14e"} err="failed to get container status \"0b9c5f8e77ea994bde66ef17be03991b2956a962da365e3c1d94679f80f4f14e\": rpc error: code = NotFound desc = could not find container \"0b9c5f8e77ea994bde66ef17be03991b2956a962da365e3c1d94679f80f4f14e\": container with ID starting with 0b9c5f8e77ea994bde66ef17be03991b2956a962da365e3c1d94679f80f4f14e not found: ID does not exist" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.345096 4854 scope.go:117] "RemoveContainer" containerID="1450678da53a375fa08cc2b99d273ab199a7f517ef32a85340e23d71df01407b" Nov 25 10:04:04 crc kubenswrapper[4854]: E1125 10:04:04.345421 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1450678da53a375fa08cc2b99d273ab199a7f517ef32a85340e23d71df01407b\": container with ID starting with 1450678da53a375fa08cc2b99d273ab199a7f517ef32a85340e23d71df01407b not found: ID does not exist" containerID="1450678da53a375fa08cc2b99d273ab199a7f517ef32a85340e23d71df01407b" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.345466 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1450678da53a375fa08cc2b99d273ab199a7f517ef32a85340e23d71df01407b"} err="failed to get container status \"1450678da53a375fa08cc2b99d273ab199a7f517ef32a85340e23d71df01407b\": rpc error: code = NotFound desc = could not find container \"1450678da53a375fa08cc2b99d273ab199a7f517ef32a85340e23d71df01407b\": container with ID starting with 1450678da53a375fa08cc2b99d273ab199a7f517ef32a85340e23d71df01407b not found: ID does not exist" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.345491 4854 scope.go:117] "RemoveContainer" containerID="0dfa1885706bc95273f5a536117653ae1807edacd4c2ca9f6b6ee56ea1c80968" Nov 25 10:04:04 crc kubenswrapper[4854]: E1125 10:04:04.345853 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0dfa1885706bc95273f5a536117653ae1807edacd4c2ca9f6b6ee56ea1c80968\": container with ID starting with 0dfa1885706bc95273f5a536117653ae1807edacd4c2ca9f6b6ee56ea1c80968 not found: ID does not exist" containerID="0dfa1885706bc95273f5a536117653ae1807edacd4c2ca9f6b6ee56ea1c80968" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.345872 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0dfa1885706bc95273f5a536117653ae1807edacd4c2ca9f6b6ee56ea1c80968"} err="failed to get container status \"0dfa1885706bc95273f5a536117653ae1807edacd4c2ca9f6b6ee56ea1c80968\": rpc error: code = NotFound desc = could not find container \"0dfa1885706bc95273f5a536117653ae1807edacd4c2ca9f6b6ee56ea1c80968\": container with ID starting with 0dfa1885706bc95273f5a536117653ae1807edacd4c2ca9f6b6ee56ea1c80968 not found: ID does not exist" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.353938 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d92bb92b-df2f-4cf2-88c0-fe50081f16de-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d92bb92b-df2f-4cf2-88c0-fe50081f16de" (UID: "d92bb92b-df2f-4cf2-88c0-fe50081f16de"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.436462 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d92bb92b-df2f-4cf2-88c0-fe50081f16de-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.454869 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.534834 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-0"] Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.561469 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Nov 25 10:04:04 crc kubenswrapper[4854]: E1125 10:04:04.562420 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d92bb92b-df2f-4cf2-88c0-fe50081f16de" containerName="aodh-notifier" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.562444 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="d92bb92b-df2f-4cf2-88c0-fe50081f16de" containerName="aodh-notifier" Nov 25 10:04:04 crc kubenswrapper[4854]: E1125 10:04:04.562476 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d92bb92b-df2f-4cf2-88c0-fe50081f16de" containerName="aodh-api" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.562484 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="d92bb92b-df2f-4cf2-88c0-fe50081f16de" containerName="aodh-api" Nov 25 10:04:04 crc kubenswrapper[4854]: E1125 10:04:04.562501 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d92bb92b-df2f-4cf2-88c0-fe50081f16de" containerName="aodh-listener" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.562509 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="d92bb92b-df2f-4cf2-88c0-fe50081f16de" containerName="aodh-listener" Nov 25 10:04:04 crc kubenswrapper[4854]: E1125 10:04:04.562522 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8be70514-ed7b-499b-acb7-c973bd2590c2" containerName="aodh-db-sync" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.562529 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="8be70514-ed7b-499b-acb7-c973bd2590c2" containerName="aodh-db-sync" Nov 25 10:04:04 crc kubenswrapper[4854]: E1125 10:04:04.562541 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d92bb92b-df2f-4cf2-88c0-fe50081f16de" containerName="aodh-evaluator" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.562546 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="d92bb92b-df2f-4cf2-88c0-fe50081f16de" containerName="aodh-evaluator" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.562844 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="d92bb92b-df2f-4cf2-88c0-fe50081f16de" containerName="aodh-notifier" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.562872 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="d92bb92b-df2f-4cf2-88c0-fe50081f16de" containerName="aodh-evaluator" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.562884 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="8be70514-ed7b-499b-acb7-c973bd2590c2" containerName="aodh-db-sync" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.562898 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="d92bb92b-df2f-4cf2-88c0-fe50081f16de" containerName="aodh-api" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.562915 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="d92bb92b-df2f-4cf2-88c0-fe50081f16de" containerName="aodh-listener" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.574914 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.580213 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.580564 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-jzshp" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.580740 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.580929 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-internal-svc" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.581163 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-public-svc" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.586249 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.650574 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qpzvj\" (UniqueName: \"kubernetes.io/projected/93de10f0-290f-47f5-bb09-214e32227b84-kube-api-access-qpzvj\") pod \"aodh-0\" (UID: \"93de10f0-290f-47f5-bb09-214e32227b84\") " pod="openstack/aodh-0" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.650751 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93de10f0-290f-47f5-bb09-214e32227b84-config-data\") pod \"aodh-0\" (UID: \"93de10f0-290f-47f5-bb09-214e32227b84\") " pod="openstack/aodh-0" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.650774 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93de10f0-290f-47f5-bb09-214e32227b84-combined-ca-bundle\") pod \"aodh-0\" (UID: \"93de10f0-290f-47f5-bb09-214e32227b84\") " pod="openstack/aodh-0" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.650831 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/93de10f0-290f-47f5-bb09-214e32227b84-internal-tls-certs\") pod \"aodh-0\" (UID: \"93de10f0-290f-47f5-bb09-214e32227b84\") " pod="openstack/aodh-0" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.650884 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93de10f0-290f-47f5-bb09-214e32227b84-scripts\") pod \"aodh-0\" (UID: \"93de10f0-290f-47f5-bb09-214e32227b84\") " pod="openstack/aodh-0" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.650997 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/93de10f0-290f-47f5-bb09-214e32227b84-public-tls-certs\") pod \"aodh-0\" (UID: \"93de10f0-290f-47f5-bb09-214e32227b84\") " pod="openstack/aodh-0" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.729116 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.753041 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93de10f0-290f-47f5-bb09-214e32227b84-scripts\") pod \"aodh-0\" (UID: \"93de10f0-290f-47f5-bb09-214e32227b84\") " pod="openstack/aodh-0" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.753181 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/93de10f0-290f-47f5-bb09-214e32227b84-public-tls-certs\") pod \"aodh-0\" (UID: \"93de10f0-290f-47f5-bb09-214e32227b84\") " pod="openstack/aodh-0" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.753233 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qpzvj\" (UniqueName: \"kubernetes.io/projected/93de10f0-290f-47f5-bb09-214e32227b84-kube-api-access-qpzvj\") pod \"aodh-0\" (UID: \"93de10f0-290f-47f5-bb09-214e32227b84\") " pod="openstack/aodh-0" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.753345 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93de10f0-290f-47f5-bb09-214e32227b84-config-data\") pod \"aodh-0\" (UID: \"93de10f0-290f-47f5-bb09-214e32227b84\") " pod="openstack/aodh-0" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.753366 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93de10f0-290f-47f5-bb09-214e32227b84-combined-ca-bundle\") pod \"aodh-0\" (UID: \"93de10f0-290f-47f5-bb09-214e32227b84\") " pod="openstack/aodh-0" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.753419 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/93de10f0-290f-47f5-bb09-214e32227b84-internal-tls-certs\") pod \"aodh-0\" (UID: \"93de10f0-290f-47f5-bb09-214e32227b84\") " pod="openstack/aodh-0" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.759285 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/93de10f0-290f-47f5-bb09-214e32227b84-internal-tls-certs\") pod \"aodh-0\" (UID: \"93de10f0-290f-47f5-bb09-214e32227b84\") " pod="openstack/aodh-0" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.763904 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93de10f0-290f-47f5-bb09-214e32227b84-combined-ca-bundle\") pod \"aodh-0\" (UID: \"93de10f0-290f-47f5-bb09-214e32227b84\") " pod="openstack/aodh-0" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.770809 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93de10f0-290f-47f5-bb09-214e32227b84-config-data\") pod \"aodh-0\" (UID: \"93de10f0-290f-47f5-bb09-214e32227b84\") " pod="openstack/aodh-0" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.772237 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qpzvj\" (UniqueName: \"kubernetes.io/projected/93de10f0-290f-47f5-bb09-214e32227b84-kube-api-access-qpzvj\") pod \"aodh-0\" (UID: \"93de10f0-290f-47f5-bb09-214e32227b84\") " pod="openstack/aodh-0" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.802431 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93de10f0-290f-47f5-bb09-214e32227b84-scripts\") pod \"aodh-0\" (UID: \"93de10f0-290f-47f5-bb09-214e32227b84\") " pod="openstack/aodh-0" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.804212 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/93de10f0-290f-47f5-bb09-214e32227b84-public-tls-certs\") pod \"aodh-0\" (UID: \"93de10f0-290f-47f5-bb09-214e32227b84\") " pod="openstack/aodh-0" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.855976 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10f4bf87-5b7e-4077-8d81-13f86562549e-repo-setup-combined-ca-bundle\") pod \"10f4bf87-5b7e-4077-8d81-13f86562549e\" (UID: \"10f4bf87-5b7e-4077-8d81-13f86562549e\") " Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.856220 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/10f4bf87-5b7e-4077-8d81-13f86562549e-ssh-key\") pod \"10f4bf87-5b7e-4077-8d81-13f86562549e\" (UID: \"10f4bf87-5b7e-4077-8d81-13f86562549e\") " Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.856369 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9fvgf\" (UniqueName: \"kubernetes.io/projected/10f4bf87-5b7e-4077-8d81-13f86562549e-kube-api-access-9fvgf\") pod \"10f4bf87-5b7e-4077-8d81-13f86562549e\" (UID: \"10f4bf87-5b7e-4077-8d81-13f86562549e\") " Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.856414 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/10f4bf87-5b7e-4077-8d81-13f86562549e-inventory\") pod \"10f4bf87-5b7e-4077-8d81-13f86562549e\" (UID: \"10f4bf87-5b7e-4077-8d81-13f86562549e\") " Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.863596 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10f4bf87-5b7e-4077-8d81-13f86562549e-kube-api-access-9fvgf" (OuterVolumeSpecName: "kube-api-access-9fvgf") pod "10f4bf87-5b7e-4077-8d81-13f86562549e" (UID: "10f4bf87-5b7e-4077-8d81-13f86562549e"). InnerVolumeSpecName "kube-api-access-9fvgf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.864896 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10f4bf87-5b7e-4077-8d81-13f86562549e-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "10f4bf87-5b7e-4077-8d81-13f86562549e" (UID: "10f4bf87-5b7e-4077-8d81-13f86562549e"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.898286 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10f4bf87-5b7e-4077-8d81-13f86562549e-inventory" (OuterVolumeSpecName: "inventory") pod "10f4bf87-5b7e-4077-8d81-13f86562549e" (UID: "10f4bf87-5b7e-4077-8d81-13f86562549e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.902903 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10f4bf87-5b7e-4077-8d81-13f86562549e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "10f4bf87-5b7e-4077-8d81-13f86562549e" (UID: "10f4bf87-5b7e-4077-8d81-13f86562549e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.960193 4854 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/10f4bf87-5b7e-4077-8d81-13f86562549e-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.960227 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9fvgf\" (UniqueName: \"kubernetes.io/projected/10f4bf87-5b7e-4077-8d81-13f86562549e-kube-api-access-9fvgf\") on node \"crc\" DevicePath \"\"" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.960238 4854 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/10f4bf87-5b7e-4077-8d81-13f86562549e-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:04:04 crc kubenswrapper[4854]: I1125 10:04:04.960246 4854 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10f4bf87-5b7e-4077-8d81-13f86562549e-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:04:05 crc kubenswrapper[4854]: I1125 10:04:05.026515 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d92bb92b-df2f-4cf2-88c0-fe50081f16de" path="/var/lib/kubelet/pods/d92bb92b-df2f-4cf2-88c0-fe50081f16de/volumes" Nov 25 10:04:05 crc kubenswrapper[4854]: I1125 10:04:05.028798 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 25 10:04:05 crc kubenswrapper[4854]: I1125 10:04:05.090460 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb" event={"ID":"10f4bf87-5b7e-4077-8d81-13f86562549e","Type":"ContainerDied","Data":"8ceb515b5c623462d87ef0fd6109cef2146a2fd4f6bc3300659a2f6745de6dbb"} Nov 25 10:04:05 crc kubenswrapper[4854]: I1125 10:04:05.090504 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8ceb515b5c623462d87ef0fd6109cef2146a2fd4f6bc3300659a2f6745de6dbb" Nov 25 10:04:05 crc kubenswrapper[4854]: I1125 10:04:05.091793 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb" Nov 25 10:04:05 crc kubenswrapper[4854]: I1125 10:04:05.158424 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-54jhk"] Nov 25 10:04:05 crc kubenswrapper[4854]: E1125 10:04:05.158992 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10f4bf87-5b7e-4077-8d81-13f86562549e" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 25 10:04:05 crc kubenswrapper[4854]: I1125 10:04:05.159005 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="10f4bf87-5b7e-4077-8d81-13f86562549e" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 25 10:04:05 crc kubenswrapper[4854]: I1125 10:04:05.159253 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="10f4bf87-5b7e-4077-8d81-13f86562549e" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 25 10:04:05 crc kubenswrapper[4854]: I1125 10:04:05.160090 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-54jhk" Nov 25 10:04:05 crc kubenswrapper[4854]: I1125 10:04:05.163163 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:04:05 crc kubenswrapper[4854]: I1125 10:04:05.163544 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 10:04:05 crc kubenswrapper[4854]: I1125 10:04:05.164097 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-6xbdw" Nov 25 10:04:05 crc kubenswrapper[4854]: I1125 10:04:05.169233 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 10:04:05 crc kubenswrapper[4854]: I1125 10:04:05.176240 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-54jhk"] Nov 25 10:04:05 crc kubenswrapper[4854]: I1125 10:04:05.269327 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/261f5f8f-12a4-4ef7-83f6-cc6a1f054279-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-54jhk\" (UID: \"261f5f8f-12a4-4ef7-83f6-cc6a1f054279\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-54jhk" Nov 25 10:04:05 crc kubenswrapper[4854]: I1125 10:04:05.269400 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dtd9p\" (UniqueName: \"kubernetes.io/projected/261f5f8f-12a4-4ef7-83f6-cc6a1f054279-kube-api-access-dtd9p\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-54jhk\" (UID: \"261f5f8f-12a4-4ef7-83f6-cc6a1f054279\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-54jhk" Nov 25 10:04:05 crc kubenswrapper[4854]: I1125 10:04:05.270014 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/261f5f8f-12a4-4ef7-83f6-cc6a1f054279-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-54jhk\" (UID: \"261f5f8f-12a4-4ef7-83f6-cc6a1f054279\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-54jhk" Nov 25 10:04:05 crc kubenswrapper[4854]: I1125 10:04:05.372703 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/261f5f8f-12a4-4ef7-83f6-cc6a1f054279-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-54jhk\" (UID: \"261f5f8f-12a4-4ef7-83f6-cc6a1f054279\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-54jhk" Nov 25 10:04:05 crc kubenswrapper[4854]: I1125 10:04:05.372766 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dtd9p\" (UniqueName: \"kubernetes.io/projected/261f5f8f-12a4-4ef7-83f6-cc6a1f054279-kube-api-access-dtd9p\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-54jhk\" (UID: \"261f5f8f-12a4-4ef7-83f6-cc6a1f054279\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-54jhk" Nov 25 10:04:05 crc kubenswrapper[4854]: I1125 10:04:05.372973 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/261f5f8f-12a4-4ef7-83f6-cc6a1f054279-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-54jhk\" (UID: \"261f5f8f-12a4-4ef7-83f6-cc6a1f054279\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-54jhk" Nov 25 10:04:05 crc kubenswrapper[4854]: I1125 10:04:05.376210 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/261f5f8f-12a4-4ef7-83f6-cc6a1f054279-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-54jhk\" (UID: \"261f5f8f-12a4-4ef7-83f6-cc6a1f054279\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-54jhk" Nov 25 10:04:05 crc kubenswrapper[4854]: I1125 10:04:05.376578 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/261f5f8f-12a4-4ef7-83f6-cc6a1f054279-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-54jhk\" (UID: \"261f5f8f-12a4-4ef7-83f6-cc6a1f054279\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-54jhk" Nov 25 10:04:05 crc kubenswrapper[4854]: I1125 10:04:05.390562 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dtd9p\" (UniqueName: \"kubernetes.io/projected/261f5f8f-12a4-4ef7-83f6-cc6a1f054279-kube-api-access-dtd9p\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-54jhk\" (UID: \"261f5f8f-12a4-4ef7-83f6-cc6a1f054279\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-54jhk" Nov 25 10:04:05 crc kubenswrapper[4854]: I1125 10:04:05.504850 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-54jhk" Nov 25 10:04:05 crc kubenswrapper[4854]: I1125 10:04:05.534658 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 25 10:04:06 crc kubenswrapper[4854]: I1125 10:04:06.058802 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-54jhk"] Nov 25 10:04:06 crc kubenswrapper[4854]: W1125 10:04:06.061633 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod261f5f8f_12a4_4ef7_83f6_cc6a1f054279.slice/crio-5323fc84e71f24cabd197cd4cbf795254cae2343d4a48ed256a39e8ca11bbccc WatchSource:0}: Error finding container 5323fc84e71f24cabd197cd4cbf795254cae2343d4a48ed256a39e8ca11bbccc: Status 404 returned error can't find the container with id 5323fc84e71f24cabd197cd4cbf795254cae2343d4a48ed256a39e8ca11bbccc Nov 25 10:04:06 crc kubenswrapper[4854]: I1125 10:04:06.105516 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"93de10f0-290f-47f5-bb09-214e32227b84","Type":"ContainerStarted","Data":"b587db28dbbaf1d8f4353534f81d9119aae8ff28c79c023f741446b2afbeefed"} Nov 25 10:04:06 crc kubenswrapper[4854]: I1125 10:04:06.106904 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"93de10f0-290f-47f5-bb09-214e32227b84","Type":"ContainerStarted","Data":"be07bc2b327b3ffe74722cd52a7fe1d35735bfbce8dee7ea8b5e97d41d9bb613"} Nov 25 10:04:06 crc kubenswrapper[4854]: I1125 10:04:06.107205 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-54jhk" event={"ID":"261f5f8f-12a4-4ef7-83f6-cc6a1f054279","Type":"ContainerStarted","Data":"5323fc84e71f24cabd197cd4cbf795254cae2343d4a48ed256a39e8ca11bbccc"} Nov 25 10:04:07 crc kubenswrapper[4854]: I1125 10:04:07.118304 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-54jhk" event={"ID":"261f5f8f-12a4-4ef7-83f6-cc6a1f054279","Type":"ContainerStarted","Data":"c55e86d456eaf2b245c1081abd2163ed87c4ba1de5025b42465c34b2b343567c"} Nov 25 10:04:07 crc kubenswrapper[4854]: I1125 10:04:07.137922 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-54jhk" podStartSLOduration=1.7241293180000001 podStartE2EDuration="2.137908192s" podCreationTimestamp="2025-11-25 10:04:05 +0000 UTC" firstStartedPulling="2025-11-25 10:04:06.065753335 +0000 UTC m=+1651.918746701" lastFinishedPulling="2025-11-25 10:04:06.479532199 +0000 UTC m=+1652.332525575" observedRunningTime="2025-11-25 10:04:07.136891885 +0000 UTC m=+1652.989885261" watchObservedRunningTime="2025-11-25 10:04:07.137908192 +0000 UTC m=+1652.990901568" Nov 25 10:04:09 crc kubenswrapper[4854]: I1125 10:04:09.151927 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"93de10f0-290f-47f5-bb09-214e32227b84","Type":"ContainerStarted","Data":"3378756b2fc8a070efc67b6aba4fed9d874ff7b2b459e68df208f515f1a11052"} Nov 25 10:04:10 crc kubenswrapper[4854]: I1125 10:04:10.175443 4854 generic.go:334] "Generic (PLEG): container finished" podID="261f5f8f-12a4-4ef7-83f6-cc6a1f054279" containerID="c55e86d456eaf2b245c1081abd2163ed87c4ba1de5025b42465c34b2b343567c" exitCode=0 Nov 25 10:04:10 crc kubenswrapper[4854]: I1125 10:04:10.175541 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-54jhk" event={"ID":"261f5f8f-12a4-4ef7-83f6-cc6a1f054279","Type":"ContainerDied","Data":"c55e86d456eaf2b245c1081abd2163ed87c4ba1de5025b42465c34b2b343567c"} Nov 25 10:04:10 crc kubenswrapper[4854]: I1125 10:04:10.181075 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"93de10f0-290f-47f5-bb09-214e32227b84","Type":"ContainerStarted","Data":"ff78695f8c33b85f916a4de56359709f5e27d54fad7a7dea64e2d209b66ed51b"} Nov 25 10:04:11 crc kubenswrapper[4854]: I1125 10:04:11.195096 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"93de10f0-290f-47f5-bb09-214e32227b84","Type":"ContainerStarted","Data":"62583f46ced6a558fc290cfb56206e1d50cae8cd3a9d7928edce853411d52400"} Nov 25 10:04:11 crc kubenswrapper[4854]: I1125 10:04:11.236604 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=2.422987526 podStartE2EDuration="7.236578818s" podCreationTimestamp="2025-11-25 10:04:04 +0000 UTC" firstStartedPulling="2025-11-25 10:04:05.544310482 +0000 UTC m=+1651.397303858" lastFinishedPulling="2025-11-25 10:04:10.357901774 +0000 UTC m=+1656.210895150" observedRunningTime="2025-11-25 10:04:11.22574825 +0000 UTC m=+1657.078741646" watchObservedRunningTime="2025-11-25 10:04:11.236578818 +0000 UTC m=+1657.089572194" Nov 25 10:04:11 crc kubenswrapper[4854]: I1125 10:04:11.822180 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-54jhk" Nov 25 10:04:11 crc kubenswrapper[4854]: I1125 10:04:11.941612 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/261f5f8f-12a4-4ef7-83f6-cc6a1f054279-ssh-key\") pod \"261f5f8f-12a4-4ef7-83f6-cc6a1f054279\" (UID: \"261f5f8f-12a4-4ef7-83f6-cc6a1f054279\") " Nov 25 10:04:11 crc kubenswrapper[4854]: I1125 10:04:11.941907 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/261f5f8f-12a4-4ef7-83f6-cc6a1f054279-inventory\") pod \"261f5f8f-12a4-4ef7-83f6-cc6a1f054279\" (UID: \"261f5f8f-12a4-4ef7-83f6-cc6a1f054279\") " Nov 25 10:04:11 crc kubenswrapper[4854]: I1125 10:04:11.942075 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dtd9p\" (UniqueName: \"kubernetes.io/projected/261f5f8f-12a4-4ef7-83f6-cc6a1f054279-kube-api-access-dtd9p\") pod \"261f5f8f-12a4-4ef7-83f6-cc6a1f054279\" (UID: \"261f5f8f-12a4-4ef7-83f6-cc6a1f054279\") " Nov 25 10:04:11 crc kubenswrapper[4854]: I1125 10:04:11.946933 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/261f5f8f-12a4-4ef7-83f6-cc6a1f054279-kube-api-access-dtd9p" (OuterVolumeSpecName: "kube-api-access-dtd9p") pod "261f5f8f-12a4-4ef7-83f6-cc6a1f054279" (UID: "261f5f8f-12a4-4ef7-83f6-cc6a1f054279"). InnerVolumeSpecName "kube-api-access-dtd9p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:04:12 crc kubenswrapper[4854]: I1125 10:04:11.999830 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/261f5f8f-12a4-4ef7-83f6-cc6a1f054279-inventory" (OuterVolumeSpecName: "inventory") pod "261f5f8f-12a4-4ef7-83f6-cc6a1f054279" (UID: "261f5f8f-12a4-4ef7-83f6-cc6a1f054279"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:04:12 crc kubenswrapper[4854]: I1125 10:04:12.034510 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/261f5f8f-12a4-4ef7-83f6-cc6a1f054279-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "261f5f8f-12a4-4ef7-83f6-cc6a1f054279" (UID: "261f5f8f-12a4-4ef7-83f6-cc6a1f054279"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:04:12 crc kubenswrapper[4854]: I1125 10:04:12.045553 4854 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/261f5f8f-12a4-4ef7-83f6-cc6a1f054279-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:04:12 crc kubenswrapper[4854]: I1125 10:04:12.045598 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dtd9p\" (UniqueName: \"kubernetes.io/projected/261f5f8f-12a4-4ef7-83f6-cc6a1f054279-kube-api-access-dtd9p\") on node \"crc\" DevicePath \"\"" Nov 25 10:04:12 crc kubenswrapper[4854]: I1125 10:04:12.045612 4854 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/261f5f8f-12a4-4ef7-83f6-cc6a1f054279-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:04:12 crc kubenswrapper[4854]: I1125 10:04:12.235944 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-54jhk" Nov 25 10:04:12 crc kubenswrapper[4854]: I1125 10:04:12.237816 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-54jhk" event={"ID":"261f5f8f-12a4-4ef7-83f6-cc6a1f054279","Type":"ContainerDied","Data":"5323fc84e71f24cabd197cd4cbf795254cae2343d4a48ed256a39e8ca11bbccc"} Nov 25 10:04:12 crc kubenswrapper[4854]: I1125 10:04:12.237863 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5323fc84e71f24cabd197cd4cbf795254cae2343d4a48ed256a39e8ca11bbccc" Nov 25 10:04:12 crc kubenswrapper[4854]: I1125 10:04:12.346957 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bdk7f"] Nov 25 10:04:12 crc kubenswrapper[4854]: E1125 10:04:12.347772 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="261f5f8f-12a4-4ef7-83f6-cc6a1f054279" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 25 10:04:12 crc kubenswrapper[4854]: I1125 10:04:12.347867 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="261f5f8f-12a4-4ef7-83f6-cc6a1f054279" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 25 10:04:12 crc kubenswrapper[4854]: I1125 10:04:12.348282 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="261f5f8f-12a4-4ef7-83f6-cc6a1f054279" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 25 10:04:12 crc kubenswrapper[4854]: I1125 10:04:12.349325 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bdk7f" Nov 25 10:04:12 crc kubenswrapper[4854]: I1125 10:04:12.355104 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-6xbdw" Nov 25 10:04:12 crc kubenswrapper[4854]: I1125 10:04:12.355387 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 10:04:12 crc kubenswrapper[4854]: I1125 10:04:12.361888 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 10:04:12 crc kubenswrapper[4854]: I1125 10:04:12.362098 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:04:12 crc kubenswrapper[4854]: I1125 10:04:12.399352 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bdk7f"] Nov 25 10:04:12 crc kubenswrapper[4854]: I1125 10:04:12.457607 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4bgbn\" (UniqueName: \"kubernetes.io/projected/82274506-4003-44e9-86ac-996dfe014de0-kube-api-access-4bgbn\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-bdk7f\" (UID: \"82274506-4003-44e9-86ac-996dfe014de0\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bdk7f" Nov 25 10:04:12 crc kubenswrapper[4854]: I1125 10:04:12.458001 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/82274506-4003-44e9-86ac-996dfe014de0-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-bdk7f\" (UID: \"82274506-4003-44e9-86ac-996dfe014de0\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bdk7f" Nov 25 10:04:12 crc kubenswrapper[4854]: I1125 10:04:12.458221 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/82274506-4003-44e9-86ac-996dfe014de0-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-bdk7f\" (UID: \"82274506-4003-44e9-86ac-996dfe014de0\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bdk7f" Nov 25 10:04:12 crc kubenswrapper[4854]: I1125 10:04:12.458585 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82274506-4003-44e9-86ac-996dfe014de0-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-bdk7f\" (UID: \"82274506-4003-44e9-86ac-996dfe014de0\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bdk7f" Nov 25 10:04:12 crc kubenswrapper[4854]: I1125 10:04:12.560531 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4bgbn\" (UniqueName: \"kubernetes.io/projected/82274506-4003-44e9-86ac-996dfe014de0-kube-api-access-4bgbn\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-bdk7f\" (UID: \"82274506-4003-44e9-86ac-996dfe014de0\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bdk7f" Nov 25 10:04:12 crc kubenswrapper[4854]: I1125 10:04:12.560648 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/82274506-4003-44e9-86ac-996dfe014de0-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-bdk7f\" (UID: \"82274506-4003-44e9-86ac-996dfe014de0\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bdk7f" Nov 25 10:04:12 crc kubenswrapper[4854]: I1125 10:04:12.560776 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/82274506-4003-44e9-86ac-996dfe014de0-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-bdk7f\" (UID: \"82274506-4003-44e9-86ac-996dfe014de0\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bdk7f" Nov 25 10:04:12 crc kubenswrapper[4854]: I1125 10:04:12.560924 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82274506-4003-44e9-86ac-996dfe014de0-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-bdk7f\" (UID: \"82274506-4003-44e9-86ac-996dfe014de0\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bdk7f" Nov 25 10:04:12 crc kubenswrapper[4854]: I1125 10:04:12.564884 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/82274506-4003-44e9-86ac-996dfe014de0-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-bdk7f\" (UID: \"82274506-4003-44e9-86ac-996dfe014de0\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bdk7f" Nov 25 10:04:12 crc kubenswrapper[4854]: I1125 10:04:12.565171 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/82274506-4003-44e9-86ac-996dfe014de0-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-bdk7f\" (UID: \"82274506-4003-44e9-86ac-996dfe014de0\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bdk7f" Nov 25 10:04:12 crc kubenswrapper[4854]: I1125 10:04:12.566053 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82274506-4003-44e9-86ac-996dfe014de0-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-bdk7f\" (UID: \"82274506-4003-44e9-86ac-996dfe014de0\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bdk7f" Nov 25 10:04:12 crc kubenswrapper[4854]: I1125 10:04:12.578618 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4bgbn\" (UniqueName: \"kubernetes.io/projected/82274506-4003-44e9-86ac-996dfe014de0-kube-api-access-4bgbn\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-bdk7f\" (UID: \"82274506-4003-44e9-86ac-996dfe014de0\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bdk7f" Nov 25 10:04:12 crc kubenswrapper[4854]: I1125 10:04:12.692621 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bdk7f" Nov 25 10:04:13 crc kubenswrapper[4854]: I1125 10:04:13.264260 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bdk7f"] Nov 25 10:04:13 crc kubenswrapper[4854]: W1125 10:04:13.273136 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod82274506_4003_44e9_86ac_996dfe014de0.slice/crio-f11dc59b1559a3ed3231d913a960bd5c350be73f1004eda898b202fbafe4876f WatchSource:0}: Error finding container f11dc59b1559a3ed3231d913a960bd5c350be73f1004eda898b202fbafe4876f: Status 404 returned error can't find the container with id f11dc59b1559a3ed3231d913a960bd5c350be73f1004eda898b202fbafe4876f Nov 25 10:04:14 crc kubenswrapper[4854]: I1125 10:04:14.013973 4854 scope.go:117] "RemoveContainer" containerID="e218b542fd934fd34b157757f419e89c8565fa64cb58598ebd3da742271577ef" Nov 25 10:04:14 crc kubenswrapper[4854]: E1125 10:04:14.014645 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:04:14 crc kubenswrapper[4854]: I1125 10:04:14.260436 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bdk7f" event={"ID":"82274506-4003-44e9-86ac-996dfe014de0","Type":"ContainerStarted","Data":"ad5a4e5f655cf522d3545e1cf883ef5fdee4982271d630711457dd4caabc3920"} Nov 25 10:04:14 crc kubenswrapper[4854]: I1125 10:04:14.260732 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bdk7f" event={"ID":"82274506-4003-44e9-86ac-996dfe014de0","Type":"ContainerStarted","Data":"f11dc59b1559a3ed3231d913a960bd5c350be73f1004eda898b202fbafe4876f"} Nov 25 10:04:14 crc kubenswrapper[4854]: I1125 10:04:14.289399 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bdk7f" podStartSLOduration=1.836592781 podStartE2EDuration="2.289377897s" podCreationTimestamp="2025-11-25 10:04:12 +0000 UTC" firstStartedPulling="2025-11-25 10:04:13.276476737 +0000 UTC m=+1659.129470113" lastFinishedPulling="2025-11-25 10:04:13.729261853 +0000 UTC m=+1659.582255229" observedRunningTime="2025-11-25 10:04:14.275182107 +0000 UTC m=+1660.128175483" watchObservedRunningTime="2025-11-25 10:04:14.289377897 +0000 UTC m=+1660.142371283" Nov 25 10:04:25 crc kubenswrapper[4854]: I1125 10:04:25.022183 4854 scope.go:117] "RemoveContainer" containerID="e218b542fd934fd34b157757f419e89c8565fa64cb58598ebd3da742271577ef" Nov 25 10:04:25 crc kubenswrapper[4854]: E1125 10:04:25.023077 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:04:29 crc kubenswrapper[4854]: I1125 10:04:29.087637 4854 generic.go:334] "Generic (PLEG): container finished" podID="1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e" containerID="e33d9bb4eec5d36a4ba737c5894e29bc900a1eea367606ce635522d56049ca5a" exitCode=0 Nov 25 10:04:29 crc kubenswrapper[4854]: I1125 10:04:29.087876 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e","Type":"ContainerDied","Data":"e33d9bb4eec5d36a4ba737c5894e29bc900a1eea367606ce635522d56049ca5a"} Nov 25 10:04:30 crc kubenswrapper[4854]: I1125 10:04:30.111970 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e","Type":"ContainerStarted","Data":"5ca4478ed831c1c21b315958542ccd006c41235ef756da6874b9b5419462eda8"} Nov 25 10:04:30 crc kubenswrapper[4854]: I1125 10:04:30.112453 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-1" Nov 25 10:04:30 crc kubenswrapper[4854]: I1125 10:04:30.141266 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-1" podStartSLOduration=38.14124623 podStartE2EDuration="38.14124623s" podCreationTimestamp="2025-11-25 10:03:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:04:30.131118651 +0000 UTC m=+1675.984112027" watchObservedRunningTime="2025-11-25 10:04:30.14124623 +0000 UTC m=+1675.994239606" Nov 25 10:04:39 crc kubenswrapper[4854]: I1125 10:04:39.014591 4854 scope.go:117] "RemoveContainer" containerID="e218b542fd934fd34b157757f419e89c8565fa64cb58598ebd3da742271577ef" Nov 25 10:04:39 crc kubenswrapper[4854]: E1125 10:04:39.015597 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:04:43 crc kubenswrapper[4854]: I1125 10:04:43.631846 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-1" Nov 25 10:04:43 crc kubenswrapper[4854]: I1125 10:04:43.694948 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 10:04:48 crc kubenswrapper[4854]: I1125 10:04:48.329230 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="6894f0be-f53f-401b-8707-4cc0cfd020dc" containerName="rabbitmq" containerID="cri-o://1d6a33eaeeb7669f31369c6579ed254715aa5a4dd07d934b4febc719bea30b1a" gracePeriod=604796 Nov 25 10:04:51 crc kubenswrapper[4854]: I1125 10:04:51.026761 4854 scope.go:117] "RemoveContainer" containerID="e218b542fd934fd34b157757f419e89c8565fa64cb58598ebd3da742271577ef" Nov 25 10:04:51 crc kubenswrapper[4854]: E1125 10:04:51.027555 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:04:52 crc kubenswrapper[4854]: I1125 10:04:52.314102 4854 scope.go:117] "RemoveContainer" containerID="3925d0ecbcf492ecf430f31674a2f8cf6d1429050e7fd4aff7d6dae030077326" Nov 25 10:04:52 crc kubenswrapper[4854]: I1125 10:04:52.355466 4854 scope.go:117] "RemoveContainer" containerID="ce845be113d7c4914879cee65d8279f1fe6e5641babefbbdaa7470c88928078b" Nov 25 10:04:52 crc kubenswrapper[4854]: I1125 10:04:52.393395 4854 scope.go:117] "RemoveContainer" containerID="e95499c001880a2f7439b1ed89af334beb9764b8371e6cc762af35f32f369db2" Nov 25 10:04:52 crc kubenswrapper[4854]: I1125 10:04:52.433309 4854 scope.go:117] "RemoveContainer" containerID="828a51a9b39919a71b5ae3a8bf1007751e179b303d3e3675ecd94d37882a7715" Nov 25 10:04:52 crc kubenswrapper[4854]: I1125 10:04:52.492229 4854 scope.go:117] "RemoveContainer" containerID="f1a6cbcb770c9dcf08497157b59b07b98a4ebfc9e1e40169544fc1aaa7aefeb3" Nov 25 10:04:52 crc kubenswrapper[4854]: I1125 10:04:52.515833 4854 scope.go:117] "RemoveContainer" containerID="1228aff76ce4e075c53c6f5d77ee2da25523328069aa7ab95cf151dc04c7c6f7" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.020754 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.087277 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/6894f0be-f53f-401b-8707-4cc0cfd020dc-rabbitmq-plugins\") pod \"6894f0be-f53f-401b-8707-4cc0cfd020dc\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.087364 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/6894f0be-f53f-401b-8707-4cc0cfd020dc-rabbitmq-confd\") pod \"6894f0be-f53f-401b-8707-4cc0cfd020dc\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.087431 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"6894f0be-f53f-401b-8707-4cc0cfd020dc\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.087538 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/6894f0be-f53f-401b-8707-4cc0cfd020dc-rabbitmq-erlang-cookie\") pod \"6894f0be-f53f-401b-8707-4cc0cfd020dc\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.087601 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tj7rl\" (UniqueName: \"kubernetes.io/projected/6894f0be-f53f-401b-8707-4cc0cfd020dc-kube-api-access-tj7rl\") pod \"6894f0be-f53f-401b-8707-4cc0cfd020dc\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.087640 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/6894f0be-f53f-401b-8707-4cc0cfd020dc-pod-info\") pod \"6894f0be-f53f-401b-8707-4cc0cfd020dc\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.087698 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6894f0be-f53f-401b-8707-4cc0cfd020dc-config-data\") pod \"6894f0be-f53f-401b-8707-4cc0cfd020dc\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.087846 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/6894f0be-f53f-401b-8707-4cc0cfd020dc-plugins-conf\") pod \"6894f0be-f53f-401b-8707-4cc0cfd020dc\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.087921 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/6894f0be-f53f-401b-8707-4cc0cfd020dc-server-conf\") pod \"6894f0be-f53f-401b-8707-4cc0cfd020dc\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.087946 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/6894f0be-f53f-401b-8707-4cc0cfd020dc-rabbitmq-tls\") pod \"6894f0be-f53f-401b-8707-4cc0cfd020dc\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.088010 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/6894f0be-f53f-401b-8707-4cc0cfd020dc-erlang-cookie-secret\") pod \"6894f0be-f53f-401b-8707-4cc0cfd020dc\" (UID: \"6894f0be-f53f-401b-8707-4cc0cfd020dc\") " Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.088944 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6894f0be-f53f-401b-8707-4cc0cfd020dc-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "6894f0be-f53f-401b-8707-4cc0cfd020dc" (UID: "6894f0be-f53f-401b-8707-4cc0cfd020dc"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.090697 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6894f0be-f53f-401b-8707-4cc0cfd020dc-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "6894f0be-f53f-401b-8707-4cc0cfd020dc" (UID: "6894f0be-f53f-401b-8707-4cc0cfd020dc"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.095792 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6894f0be-f53f-401b-8707-4cc0cfd020dc-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "6894f0be-f53f-401b-8707-4cc0cfd020dc" (UID: "6894f0be-f53f-401b-8707-4cc0cfd020dc"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.095915 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6894f0be-f53f-401b-8707-4cc0cfd020dc-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "6894f0be-f53f-401b-8707-4cc0cfd020dc" (UID: "6894f0be-f53f-401b-8707-4cc0cfd020dc"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.115433 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "persistence") pod "6894f0be-f53f-401b-8707-4cc0cfd020dc" (UID: "6894f0be-f53f-401b-8707-4cc0cfd020dc"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.116774 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/6894f0be-f53f-401b-8707-4cc0cfd020dc-pod-info" (OuterVolumeSpecName: "pod-info") pod "6894f0be-f53f-401b-8707-4cc0cfd020dc" (UID: "6894f0be-f53f-401b-8707-4cc0cfd020dc"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.125559 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6894f0be-f53f-401b-8707-4cc0cfd020dc-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "6894f0be-f53f-401b-8707-4cc0cfd020dc" (UID: "6894f0be-f53f-401b-8707-4cc0cfd020dc"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.125826 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6894f0be-f53f-401b-8707-4cc0cfd020dc-kube-api-access-tj7rl" (OuterVolumeSpecName: "kube-api-access-tj7rl") pod "6894f0be-f53f-401b-8707-4cc0cfd020dc" (UID: "6894f0be-f53f-401b-8707-4cc0cfd020dc"). InnerVolumeSpecName "kube-api-access-tj7rl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.136081 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6894f0be-f53f-401b-8707-4cc0cfd020dc-config-data" (OuterVolumeSpecName: "config-data") pod "6894f0be-f53f-401b-8707-4cc0cfd020dc" (UID: "6894f0be-f53f-401b-8707-4cc0cfd020dc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.174545 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6894f0be-f53f-401b-8707-4cc0cfd020dc-server-conf" (OuterVolumeSpecName: "server-conf") pod "6894f0be-f53f-401b-8707-4cc0cfd020dc" (UID: "6894f0be-f53f-401b-8707-4cc0cfd020dc"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.190829 4854 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/6894f0be-f53f-401b-8707-4cc0cfd020dc-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.190860 4854 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/6894f0be-f53f-401b-8707-4cc0cfd020dc-server-conf\") on node \"crc\" DevicePath \"\"" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.190869 4854 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/6894f0be-f53f-401b-8707-4cc0cfd020dc-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.190879 4854 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/6894f0be-f53f-401b-8707-4cc0cfd020dc-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.190888 4854 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/6894f0be-f53f-401b-8707-4cc0cfd020dc-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.190908 4854 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.190917 4854 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/6894f0be-f53f-401b-8707-4cc0cfd020dc-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.190928 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tj7rl\" (UniqueName: \"kubernetes.io/projected/6894f0be-f53f-401b-8707-4cc0cfd020dc-kube-api-access-tj7rl\") on node \"crc\" DevicePath \"\"" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.190941 4854 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/6894f0be-f53f-401b-8707-4cc0cfd020dc-pod-info\") on node \"crc\" DevicePath \"\"" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.190950 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6894f0be-f53f-401b-8707-4cc0cfd020dc-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.224342 4854 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.270109 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6894f0be-f53f-401b-8707-4cc0cfd020dc-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "6894f0be-f53f-401b-8707-4cc0cfd020dc" (UID: "6894f0be-f53f-401b-8707-4cc0cfd020dc"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.293509 4854 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/6894f0be-f53f-401b-8707-4cc0cfd020dc-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.293544 4854 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.493033 4854 generic.go:334] "Generic (PLEG): container finished" podID="6894f0be-f53f-401b-8707-4cc0cfd020dc" containerID="1d6a33eaeeb7669f31369c6579ed254715aa5a4dd07d934b4febc719bea30b1a" exitCode=0 Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.493094 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"6894f0be-f53f-401b-8707-4cc0cfd020dc","Type":"ContainerDied","Data":"1d6a33eaeeb7669f31369c6579ed254715aa5a4dd07d934b4febc719bea30b1a"} Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.493125 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"6894f0be-f53f-401b-8707-4cc0cfd020dc","Type":"ContainerDied","Data":"0670d50acf9d21a887e1044760c2332b9d3892cf49dd461fc14f468eba91a844"} Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.493166 4854 scope.go:117] "RemoveContainer" containerID="1d6a33eaeeb7669f31369c6579ed254715aa5a4dd07d934b4febc719bea30b1a" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.493439 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.561120 4854 scope.go:117] "RemoveContainer" containerID="d885495fc11b7f4d9575608f39ce77e3f0ad78db4459a1f55ec565f35edd06ec" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.625775 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.640867 4854 scope.go:117] "RemoveContainer" containerID="1d6a33eaeeb7669f31369c6579ed254715aa5a4dd07d934b4febc719bea30b1a" Nov 25 10:04:55 crc kubenswrapper[4854]: E1125 10:04:55.641781 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1d6a33eaeeb7669f31369c6579ed254715aa5a4dd07d934b4febc719bea30b1a\": container with ID starting with 1d6a33eaeeb7669f31369c6579ed254715aa5a4dd07d934b4febc719bea30b1a not found: ID does not exist" containerID="1d6a33eaeeb7669f31369c6579ed254715aa5a4dd07d934b4febc719bea30b1a" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.641907 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1d6a33eaeeb7669f31369c6579ed254715aa5a4dd07d934b4febc719bea30b1a"} err="failed to get container status \"1d6a33eaeeb7669f31369c6579ed254715aa5a4dd07d934b4febc719bea30b1a\": rpc error: code = NotFound desc = could not find container \"1d6a33eaeeb7669f31369c6579ed254715aa5a4dd07d934b4febc719bea30b1a\": container with ID starting with 1d6a33eaeeb7669f31369c6579ed254715aa5a4dd07d934b4febc719bea30b1a not found: ID does not exist" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.642037 4854 scope.go:117] "RemoveContainer" containerID="d885495fc11b7f4d9575608f39ce77e3f0ad78db4459a1f55ec565f35edd06ec" Nov 25 10:04:55 crc kubenswrapper[4854]: E1125 10:04:55.645295 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d885495fc11b7f4d9575608f39ce77e3f0ad78db4459a1f55ec565f35edd06ec\": container with ID starting with d885495fc11b7f4d9575608f39ce77e3f0ad78db4459a1f55ec565f35edd06ec not found: ID does not exist" containerID="d885495fc11b7f4d9575608f39ce77e3f0ad78db4459a1f55ec565f35edd06ec" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.645348 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d885495fc11b7f4d9575608f39ce77e3f0ad78db4459a1f55ec565f35edd06ec"} err="failed to get container status \"d885495fc11b7f4d9575608f39ce77e3f0ad78db4459a1f55ec565f35edd06ec\": rpc error: code = NotFound desc = could not find container \"d885495fc11b7f4d9575608f39ce77e3f0ad78db4459a1f55ec565f35edd06ec\": container with ID starting with d885495fc11b7f4d9575608f39ce77e3f0ad78db4459a1f55ec565f35edd06ec not found: ID does not exist" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.653058 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.672733 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 10:04:55 crc kubenswrapper[4854]: E1125 10:04:55.673353 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6894f0be-f53f-401b-8707-4cc0cfd020dc" containerName="rabbitmq" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.673365 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="6894f0be-f53f-401b-8707-4cc0cfd020dc" containerName="rabbitmq" Nov 25 10:04:55 crc kubenswrapper[4854]: E1125 10:04:55.673396 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6894f0be-f53f-401b-8707-4cc0cfd020dc" containerName="setup-container" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.673404 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="6894f0be-f53f-401b-8707-4cc0cfd020dc" containerName="setup-container" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.673691 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="6894f0be-f53f-401b-8707-4cc0cfd020dc" containerName="rabbitmq" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.675129 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.692642 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.818862 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ff5d28f8-deea-439b-abb6-7882641c046f-server-conf\") pod \"rabbitmq-server-0\" (UID: \"ff5d28f8-deea-439b-abb6-7882641c046f\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.818921 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ff5d28f8-deea-439b-abb6-7882641c046f-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"ff5d28f8-deea-439b-abb6-7882641c046f\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.819091 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lzmvq\" (UniqueName: \"kubernetes.io/projected/ff5d28f8-deea-439b-abb6-7882641c046f-kube-api-access-lzmvq\") pod \"rabbitmq-server-0\" (UID: \"ff5d28f8-deea-439b-abb6-7882641c046f\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.819257 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ff5d28f8-deea-439b-abb6-7882641c046f-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"ff5d28f8-deea-439b-abb6-7882641c046f\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.819388 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ff5d28f8-deea-439b-abb6-7882641c046f-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"ff5d28f8-deea-439b-abb6-7882641c046f\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.819621 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ff5d28f8-deea-439b-abb6-7882641c046f-pod-info\") pod \"rabbitmq-server-0\" (UID: \"ff5d28f8-deea-439b-abb6-7882641c046f\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.819720 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ff5d28f8-deea-439b-abb6-7882641c046f-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"ff5d28f8-deea-439b-abb6-7882641c046f\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.819844 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"ff5d28f8-deea-439b-abb6-7882641c046f\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.819875 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ff5d28f8-deea-439b-abb6-7882641c046f-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"ff5d28f8-deea-439b-abb6-7882641c046f\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.819899 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ff5d28f8-deea-439b-abb6-7882641c046f-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"ff5d28f8-deea-439b-abb6-7882641c046f\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.819946 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ff5d28f8-deea-439b-abb6-7882641c046f-config-data\") pod \"rabbitmq-server-0\" (UID: \"ff5d28f8-deea-439b-abb6-7882641c046f\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.922305 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ff5d28f8-deea-439b-abb6-7882641c046f-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"ff5d28f8-deea-439b-abb6-7882641c046f\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.922373 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ff5d28f8-deea-439b-abb6-7882641c046f-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"ff5d28f8-deea-439b-abb6-7882641c046f\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.922436 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ff5d28f8-deea-439b-abb6-7882641c046f-pod-info\") pod \"rabbitmq-server-0\" (UID: \"ff5d28f8-deea-439b-abb6-7882641c046f\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.922466 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ff5d28f8-deea-439b-abb6-7882641c046f-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"ff5d28f8-deea-439b-abb6-7882641c046f\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.922504 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"ff5d28f8-deea-439b-abb6-7882641c046f\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.922520 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ff5d28f8-deea-439b-abb6-7882641c046f-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"ff5d28f8-deea-439b-abb6-7882641c046f\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.922540 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ff5d28f8-deea-439b-abb6-7882641c046f-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"ff5d28f8-deea-439b-abb6-7882641c046f\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.922564 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ff5d28f8-deea-439b-abb6-7882641c046f-config-data\") pod \"rabbitmq-server-0\" (UID: \"ff5d28f8-deea-439b-abb6-7882641c046f\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.922595 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ff5d28f8-deea-439b-abb6-7882641c046f-server-conf\") pod \"rabbitmq-server-0\" (UID: \"ff5d28f8-deea-439b-abb6-7882641c046f\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.922618 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ff5d28f8-deea-439b-abb6-7882641c046f-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"ff5d28f8-deea-439b-abb6-7882641c046f\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.922715 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lzmvq\" (UniqueName: \"kubernetes.io/projected/ff5d28f8-deea-439b-abb6-7882641c046f-kube-api-access-lzmvq\") pod \"rabbitmq-server-0\" (UID: \"ff5d28f8-deea-439b-abb6-7882641c046f\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.923386 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ff5d28f8-deea-439b-abb6-7882641c046f-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"ff5d28f8-deea-439b-abb6-7882641c046f\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.924031 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ff5d28f8-deea-439b-abb6-7882641c046f-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"ff5d28f8-deea-439b-abb6-7882641c046f\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.924194 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ff5d28f8-deea-439b-abb6-7882641c046f-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"ff5d28f8-deea-439b-abb6-7882641c046f\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.924230 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ff5d28f8-deea-439b-abb6-7882641c046f-server-conf\") pod \"rabbitmq-server-0\" (UID: \"ff5d28f8-deea-439b-abb6-7882641c046f\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.924884 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ff5d28f8-deea-439b-abb6-7882641c046f-config-data\") pod \"rabbitmq-server-0\" (UID: \"ff5d28f8-deea-439b-abb6-7882641c046f\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.925030 4854 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"ff5d28f8-deea-439b-abb6-7882641c046f\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/rabbitmq-server-0" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.926115 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ff5d28f8-deea-439b-abb6-7882641c046f-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"ff5d28f8-deea-439b-abb6-7882641c046f\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.926787 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ff5d28f8-deea-439b-abb6-7882641c046f-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"ff5d28f8-deea-439b-abb6-7882641c046f\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.927353 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ff5d28f8-deea-439b-abb6-7882641c046f-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"ff5d28f8-deea-439b-abb6-7882641c046f\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.934193 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ff5d28f8-deea-439b-abb6-7882641c046f-pod-info\") pod \"rabbitmq-server-0\" (UID: \"ff5d28f8-deea-439b-abb6-7882641c046f\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.941262 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lzmvq\" (UniqueName: \"kubernetes.io/projected/ff5d28f8-deea-439b-abb6-7882641c046f-kube-api-access-lzmvq\") pod \"rabbitmq-server-0\" (UID: \"ff5d28f8-deea-439b-abb6-7882641c046f\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:55 crc kubenswrapper[4854]: I1125 10:04:55.968498 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"ff5d28f8-deea-439b-abb6-7882641c046f\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:56 crc kubenswrapper[4854]: I1125 10:04:56.025511 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 10:04:56 crc kubenswrapper[4854]: I1125 10:04:56.511377 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 10:04:57 crc kubenswrapper[4854]: I1125 10:04:57.025763 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6894f0be-f53f-401b-8707-4cc0cfd020dc" path="/var/lib/kubelet/pods/6894f0be-f53f-401b-8707-4cc0cfd020dc/volumes" Nov 25 10:04:57 crc kubenswrapper[4854]: I1125 10:04:57.518223 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"ff5d28f8-deea-439b-abb6-7882641c046f","Type":"ContainerStarted","Data":"2436933a3f812c712967d58829f1b27c2473baeb6c4fa71cd97d341c6737baf5"} Nov 25 10:04:58 crc kubenswrapper[4854]: I1125 10:04:58.530082 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"ff5d28f8-deea-439b-abb6-7882641c046f","Type":"ContainerStarted","Data":"f7b70588bec070e41117952d7518ec7b1da7dd209d835001e82945f7232f7bdb"} Nov 25 10:04:59 crc kubenswrapper[4854]: I1125 10:04:59.838971 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="6894f0be-f53f-401b-8707-4cc0cfd020dc" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.129:5671: i/o timeout" Nov 25 10:05:05 crc kubenswrapper[4854]: I1125 10:05:05.022377 4854 scope.go:117] "RemoveContainer" containerID="e218b542fd934fd34b157757f419e89c8565fa64cb58598ebd3da742271577ef" Nov 25 10:05:05 crc kubenswrapper[4854]: E1125 10:05:05.023341 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:05:16 crc kubenswrapper[4854]: I1125 10:05:16.015491 4854 scope.go:117] "RemoveContainer" containerID="e218b542fd934fd34b157757f419e89c8565fa64cb58598ebd3da742271577ef" Nov 25 10:05:16 crc kubenswrapper[4854]: E1125 10:05:16.017226 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:05:28 crc kubenswrapper[4854]: I1125 10:05:28.014988 4854 scope.go:117] "RemoveContainer" containerID="e218b542fd934fd34b157757f419e89c8565fa64cb58598ebd3da742271577ef" Nov 25 10:05:28 crc kubenswrapper[4854]: E1125 10:05:28.016874 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:05:30 crc kubenswrapper[4854]: I1125 10:05:30.937905 4854 generic.go:334] "Generic (PLEG): container finished" podID="ff5d28f8-deea-439b-abb6-7882641c046f" containerID="f7b70588bec070e41117952d7518ec7b1da7dd209d835001e82945f7232f7bdb" exitCode=0 Nov 25 10:05:30 crc kubenswrapper[4854]: I1125 10:05:30.938123 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"ff5d28f8-deea-439b-abb6-7882641c046f","Type":"ContainerDied","Data":"f7b70588bec070e41117952d7518ec7b1da7dd209d835001e82945f7232f7bdb"} Nov 25 10:05:31 crc kubenswrapper[4854]: I1125 10:05:31.949905 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"ff5d28f8-deea-439b-abb6-7882641c046f","Type":"ContainerStarted","Data":"290c19cb436ffd5ee02dad9ed00eec3b190272f38d10f6be96aa14a4558dfb73"} Nov 25 10:05:31 crc kubenswrapper[4854]: I1125 10:05:31.950906 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 25 10:05:31 crc kubenswrapper[4854]: I1125 10:05:31.984570 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=36.984548607 podStartE2EDuration="36.984548607s" podCreationTimestamp="2025-11-25 10:04:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:05:31.974861271 +0000 UTC m=+1737.827854667" watchObservedRunningTime="2025-11-25 10:05:31.984548607 +0000 UTC m=+1737.837541983" Nov 25 10:05:40 crc kubenswrapper[4854]: I1125 10:05:40.014882 4854 scope.go:117] "RemoveContainer" containerID="e218b542fd934fd34b157757f419e89c8565fa64cb58598ebd3da742271577ef" Nov 25 10:05:40 crc kubenswrapper[4854]: E1125 10:05:40.018212 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:05:46 crc kubenswrapper[4854]: I1125 10:05:46.028984 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 25 10:05:51 crc kubenswrapper[4854]: I1125 10:05:51.013737 4854 scope.go:117] "RemoveContainer" containerID="e218b542fd934fd34b157757f419e89c8565fa64cb58598ebd3da742271577ef" Nov 25 10:05:51 crc kubenswrapper[4854]: E1125 10:05:51.015133 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:05:52 crc kubenswrapper[4854]: I1125 10:05:52.681732 4854 scope.go:117] "RemoveContainer" containerID="187e9d86a62fe917a0205f8d650b4d046ab246dc1b2d0cf9a060f1c5e6af8684" Nov 25 10:05:52 crc kubenswrapper[4854]: I1125 10:05:52.713532 4854 scope.go:117] "RemoveContainer" containerID="7c876d7b8becacb2d7cf0f7a7a2fa2e375c90003ca3092d163dae8f1456e2edc" Nov 25 10:05:52 crc kubenswrapper[4854]: I1125 10:05:52.750295 4854 scope.go:117] "RemoveContainer" containerID="e5f32de72db68fb2febcc995379ea1797a1943daefae6f7e6ab7928d519f5f4c" Nov 25 10:05:52 crc kubenswrapper[4854]: I1125 10:05:52.789653 4854 scope.go:117] "RemoveContainer" containerID="f4bd7dcd6a3a6f0a10783c43cb0c6743e72d0cf19ee892df5226a7b29f6efc1f" Nov 25 10:06:04 crc kubenswrapper[4854]: I1125 10:06:04.014296 4854 scope.go:117] "RemoveContainer" containerID="e218b542fd934fd34b157757f419e89c8565fa64cb58598ebd3da742271577ef" Nov 25 10:06:04 crc kubenswrapper[4854]: E1125 10:06:04.015178 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:06:18 crc kubenswrapper[4854]: I1125 10:06:18.013276 4854 scope.go:117] "RemoveContainer" containerID="e218b542fd934fd34b157757f419e89c8565fa64cb58598ebd3da742271577ef" Nov 25 10:06:18 crc kubenswrapper[4854]: E1125 10:06:18.014157 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:06:33 crc kubenswrapper[4854]: I1125 10:06:33.013815 4854 scope.go:117] "RemoveContainer" containerID="e218b542fd934fd34b157757f419e89c8565fa64cb58598ebd3da742271577ef" Nov 25 10:06:33 crc kubenswrapper[4854]: E1125 10:06:33.014825 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:06:44 crc kubenswrapper[4854]: I1125 10:06:44.014073 4854 scope.go:117] "RemoveContainer" containerID="e218b542fd934fd34b157757f419e89c8565fa64cb58598ebd3da742271577ef" Nov 25 10:06:44 crc kubenswrapper[4854]: E1125 10:06:44.014890 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:06:52 crc kubenswrapper[4854]: I1125 10:06:52.964140 4854 scope.go:117] "RemoveContainer" containerID="52d00e92708fd941e776114792bd3513c0b495c32e260958d01d56151406181d" Nov 25 10:06:52 crc kubenswrapper[4854]: I1125 10:06:52.997127 4854 scope.go:117] "RemoveContainer" containerID="078ec9a90435e71ae681447221ff162b9e13be26859acb5b11f00d98694e0dde" Nov 25 10:06:53 crc kubenswrapper[4854]: I1125 10:06:53.079453 4854 scope.go:117] "RemoveContainer" containerID="cb307152b9abbf4f85ace9d8fe1685bbccc335f8630367a22f3cf2b126796348" Nov 25 10:06:53 crc kubenswrapper[4854]: I1125 10:06:53.141275 4854 scope.go:117] "RemoveContainer" containerID="2c232ef0b916716efc7a0cf054575944afee60d4eee54d0ea98362471a6c33ec" Nov 25 10:06:54 crc kubenswrapper[4854]: I1125 10:06:54.057945 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-rkthx"] Nov 25 10:06:54 crc kubenswrapper[4854]: I1125 10:06:54.075844 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-a056-account-create-gtc4f"] Nov 25 10:06:54 crc kubenswrapper[4854]: I1125 10:06:54.092610 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-a056-account-create-gtc4f"] Nov 25 10:06:54 crc kubenswrapper[4854]: I1125 10:06:54.105656 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-rkthx"] Nov 25 10:06:55 crc kubenswrapper[4854]: I1125 10:06:55.035944 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="76c016ea-a928-4d8a-9936-ca262a75afa4" path="/var/lib/kubelet/pods/76c016ea-a928-4d8a-9936-ca262a75afa4/volumes" Nov 25 10:06:55 crc kubenswrapper[4854]: I1125 10:06:55.041656 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9dfd5d17-f81b-40bd-82a8-e52a2c1d8429" path="/var/lib/kubelet/pods/9dfd5d17-f81b-40bd-82a8-e52a2c1d8429/volumes" Nov 25 10:06:55 crc kubenswrapper[4854]: I1125 10:06:55.044609 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-0eb6-account-create-8djfp"] Nov 25 10:06:55 crc kubenswrapper[4854]: I1125 10:06:55.052790 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-0eb6-account-create-8djfp"] Nov 25 10:06:56 crc kubenswrapper[4854]: I1125 10:06:56.014459 4854 scope.go:117] "RemoveContainer" containerID="e218b542fd934fd34b157757f419e89c8565fa64cb58598ebd3da742271577ef" Nov 25 10:06:56 crc kubenswrapper[4854]: E1125 10:06:56.015099 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:06:56 crc kubenswrapper[4854]: I1125 10:06:56.073229 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-30f6-account-create-v94c4"] Nov 25 10:06:56 crc kubenswrapper[4854]: I1125 10:06:56.087796 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-x6mc4"] Nov 25 10:06:56 crc kubenswrapper[4854]: I1125 10:06:56.098594 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-6zmvt"] Nov 25 10:06:56 crc kubenswrapper[4854]: I1125 10:06:56.110566 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-30f6-account-create-v94c4"] Nov 25 10:06:56 crc kubenswrapper[4854]: I1125 10:06:56.126116 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-q8skm"] Nov 25 10:06:56 crc kubenswrapper[4854]: I1125 10:06:56.146629 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-x6mc4"] Nov 25 10:06:56 crc kubenswrapper[4854]: I1125 10:06:56.159583 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-76e1-account-create-dv8dm"] Nov 25 10:06:56 crc kubenswrapper[4854]: I1125 10:06:56.172180 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-6zmvt"] Nov 25 10:06:56 crc kubenswrapper[4854]: I1125 10:06:56.193032 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-76e1-account-create-dv8dm"] Nov 25 10:06:56 crc kubenswrapper[4854]: I1125 10:06:56.205831 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-q8skm"] Nov 25 10:06:57 crc kubenswrapper[4854]: I1125 10:06:57.032530 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2af4c5bf-31f7-4a80-8ceb-244ede8ba2b7" path="/var/lib/kubelet/pods/2af4c5bf-31f7-4a80-8ceb-244ede8ba2b7/volumes" Nov 25 10:06:57 crc kubenswrapper[4854]: I1125 10:06:57.036731 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="423749f9-fd4e-4d6c-860e-e5d17269da04" path="/var/lib/kubelet/pods/423749f9-fd4e-4d6c-860e-e5d17269da04/volumes" Nov 25 10:06:57 crc kubenswrapper[4854]: I1125 10:06:57.039386 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="513c3ff8-3514-4d35-bbec-52fa4e2bd363" path="/var/lib/kubelet/pods/513c3ff8-3514-4d35-bbec-52fa4e2bd363/volumes" Nov 25 10:06:57 crc kubenswrapper[4854]: I1125 10:06:57.041522 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6e21e324-1ba4-4b7b-b45c-197db1c1e890" path="/var/lib/kubelet/pods/6e21e324-1ba4-4b7b-b45c-197db1c1e890/volumes" Nov 25 10:06:57 crc kubenswrapper[4854]: I1125 10:06:57.043964 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6efa34a3-9747-4cb3-b829-f4d95b402668" path="/var/lib/kubelet/pods/6efa34a3-9747-4cb3-b829-f4d95b402668/volumes" Nov 25 10:06:57 crc kubenswrapper[4854]: I1125 10:06:57.045562 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c5c9071a-fe25-4ca2-bee6-38fea725d4d4" path="/var/lib/kubelet/pods/c5c9071a-fe25-4ca2-bee6-38fea725d4d4/volumes" Nov 25 10:07:05 crc kubenswrapper[4854]: I1125 10:07:05.036860 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-t2zxg"] Nov 25 10:07:05 crc kubenswrapper[4854]: I1125 10:07:05.054243 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-t2zxg"] Nov 25 10:07:05 crc kubenswrapper[4854]: I1125 10:07:05.066298 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-74f0-account-create-9v7gt"] Nov 25 10:07:05 crc kubenswrapper[4854]: I1125 10:07:05.078066 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-74f0-account-create-9v7gt"] Nov 25 10:07:07 crc kubenswrapper[4854]: I1125 10:07:07.035154 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a460ca1-2f26-4dd4-8618-ac0c329b4689" path="/var/lib/kubelet/pods/9a460ca1-2f26-4dd4-8618-ac0c329b4689/volumes" Nov 25 10:07:07 crc kubenswrapper[4854]: I1125 10:07:07.036761 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7e6380a-24e5-4ed2-aba6-e4b5a0adc0bc" path="/var/lib/kubelet/pods/f7e6380a-24e5-4ed2-aba6-e4b5a0adc0bc/volumes" Nov 25 10:07:08 crc kubenswrapper[4854]: I1125 10:07:08.013723 4854 scope.go:117] "RemoveContainer" containerID="e218b542fd934fd34b157757f419e89c8565fa64cb58598ebd3da742271577ef" Nov 25 10:07:08 crc kubenswrapper[4854]: E1125 10:07:08.014181 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:07:20 crc kubenswrapper[4854]: I1125 10:07:20.013938 4854 scope.go:117] "RemoveContainer" containerID="e218b542fd934fd34b157757f419e89c8565fa64cb58598ebd3da742271577ef" Nov 25 10:07:20 crc kubenswrapper[4854]: E1125 10:07:20.014849 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:07:30 crc kubenswrapper[4854]: I1125 10:07:30.466411 4854 generic.go:334] "Generic (PLEG): container finished" podID="82274506-4003-44e9-86ac-996dfe014de0" containerID="ad5a4e5f655cf522d3545e1cf883ef5fdee4982271d630711457dd4caabc3920" exitCode=0 Nov 25 10:07:30 crc kubenswrapper[4854]: I1125 10:07:30.466476 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bdk7f" event={"ID":"82274506-4003-44e9-86ac-996dfe014de0","Type":"ContainerDied","Data":"ad5a4e5f655cf522d3545e1cf883ef5fdee4982271d630711457dd4caabc3920"} Nov 25 10:07:31 crc kubenswrapper[4854]: I1125 10:07:31.949762 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bdk7f" Nov 25 10:07:32 crc kubenswrapper[4854]: I1125 10:07:32.013663 4854 scope.go:117] "RemoveContainer" containerID="e218b542fd934fd34b157757f419e89c8565fa64cb58598ebd3da742271577ef" Nov 25 10:07:32 crc kubenswrapper[4854]: E1125 10:07:32.013958 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:07:32 crc kubenswrapper[4854]: I1125 10:07:32.070823 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/82274506-4003-44e9-86ac-996dfe014de0-ssh-key\") pod \"82274506-4003-44e9-86ac-996dfe014de0\" (UID: \"82274506-4003-44e9-86ac-996dfe014de0\") " Nov 25 10:07:32 crc kubenswrapper[4854]: I1125 10:07:32.070896 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4bgbn\" (UniqueName: \"kubernetes.io/projected/82274506-4003-44e9-86ac-996dfe014de0-kube-api-access-4bgbn\") pod \"82274506-4003-44e9-86ac-996dfe014de0\" (UID: \"82274506-4003-44e9-86ac-996dfe014de0\") " Nov 25 10:07:32 crc kubenswrapper[4854]: I1125 10:07:32.070954 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/82274506-4003-44e9-86ac-996dfe014de0-inventory\") pod \"82274506-4003-44e9-86ac-996dfe014de0\" (UID: \"82274506-4003-44e9-86ac-996dfe014de0\") " Nov 25 10:07:32 crc kubenswrapper[4854]: I1125 10:07:32.071166 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82274506-4003-44e9-86ac-996dfe014de0-bootstrap-combined-ca-bundle\") pod \"82274506-4003-44e9-86ac-996dfe014de0\" (UID: \"82274506-4003-44e9-86ac-996dfe014de0\") " Nov 25 10:07:32 crc kubenswrapper[4854]: I1125 10:07:32.077711 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82274506-4003-44e9-86ac-996dfe014de0-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "82274506-4003-44e9-86ac-996dfe014de0" (UID: "82274506-4003-44e9-86ac-996dfe014de0"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:32 crc kubenswrapper[4854]: I1125 10:07:32.077936 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82274506-4003-44e9-86ac-996dfe014de0-kube-api-access-4bgbn" (OuterVolumeSpecName: "kube-api-access-4bgbn") pod "82274506-4003-44e9-86ac-996dfe014de0" (UID: "82274506-4003-44e9-86ac-996dfe014de0"). InnerVolumeSpecName "kube-api-access-4bgbn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:07:32 crc kubenswrapper[4854]: I1125 10:07:32.106054 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82274506-4003-44e9-86ac-996dfe014de0-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "82274506-4003-44e9-86ac-996dfe014de0" (UID: "82274506-4003-44e9-86ac-996dfe014de0"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:32 crc kubenswrapper[4854]: I1125 10:07:32.107132 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82274506-4003-44e9-86ac-996dfe014de0-inventory" (OuterVolumeSpecName: "inventory") pod "82274506-4003-44e9-86ac-996dfe014de0" (UID: "82274506-4003-44e9-86ac-996dfe014de0"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:32 crc kubenswrapper[4854]: I1125 10:07:32.174487 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4bgbn\" (UniqueName: \"kubernetes.io/projected/82274506-4003-44e9-86ac-996dfe014de0-kube-api-access-4bgbn\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:32 crc kubenswrapper[4854]: I1125 10:07:32.174532 4854 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/82274506-4003-44e9-86ac-996dfe014de0-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:32 crc kubenswrapper[4854]: I1125 10:07:32.174545 4854 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82274506-4003-44e9-86ac-996dfe014de0-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:32 crc kubenswrapper[4854]: I1125 10:07:32.174561 4854 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/82274506-4003-44e9-86ac-996dfe014de0-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:32 crc kubenswrapper[4854]: I1125 10:07:32.492111 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bdk7f" event={"ID":"82274506-4003-44e9-86ac-996dfe014de0","Type":"ContainerDied","Data":"f11dc59b1559a3ed3231d913a960bd5c350be73f1004eda898b202fbafe4876f"} Nov 25 10:07:32 crc kubenswrapper[4854]: I1125 10:07:32.492459 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f11dc59b1559a3ed3231d913a960bd5c350be73f1004eda898b202fbafe4876f" Nov 25 10:07:32 crc kubenswrapper[4854]: I1125 10:07:32.492180 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bdk7f" Nov 25 10:07:32 crc kubenswrapper[4854]: I1125 10:07:32.599104 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-9bxcq"] Nov 25 10:07:32 crc kubenswrapper[4854]: E1125 10:07:32.599783 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82274506-4003-44e9-86ac-996dfe014de0" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 25 10:07:32 crc kubenswrapper[4854]: I1125 10:07:32.599800 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="82274506-4003-44e9-86ac-996dfe014de0" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 25 10:07:32 crc kubenswrapper[4854]: I1125 10:07:32.600035 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="82274506-4003-44e9-86ac-996dfe014de0" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 25 10:07:32 crc kubenswrapper[4854]: I1125 10:07:32.600904 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-9bxcq" Nov 25 10:07:32 crc kubenswrapper[4854]: I1125 10:07:32.609369 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 10:07:32 crc kubenswrapper[4854]: I1125 10:07:32.609449 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 10:07:32 crc kubenswrapper[4854]: I1125 10:07:32.609721 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:07:32 crc kubenswrapper[4854]: I1125 10:07:32.609748 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-6xbdw" Nov 25 10:07:32 crc kubenswrapper[4854]: I1125 10:07:32.622022 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-9bxcq"] Nov 25 10:07:32 crc kubenswrapper[4854]: I1125 10:07:32.686005 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lkjnf\" (UniqueName: \"kubernetes.io/projected/51c1f6ef-dc9b-4a3a-be06-21f8c34594b1-kube-api-access-lkjnf\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-9bxcq\" (UID: \"51c1f6ef-dc9b-4a3a-be06-21f8c34594b1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-9bxcq" Nov 25 10:07:32 crc kubenswrapper[4854]: I1125 10:07:32.686073 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/51c1f6ef-dc9b-4a3a-be06-21f8c34594b1-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-9bxcq\" (UID: \"51c1f6ef-dc9b-4a3a-be06-21f8c34594b1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-9bxcq" Nov 25 10:07:32 crc kubenswrapper[4854]: I1125 10:07:32.686275 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/51c1f6ef-dc9b-4a3a-be06-21f8c34594b1-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-9bxcq\" (UID: \"51c1f6ef-dc9b-4a3a-be06-21f8c34594b1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-9bxcq" Nov 25 10:07:32 crc kubenswrapper[4854]: I1125 10:07:32.787207 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/51c1f6ef-dc9b-4a3a-be06-21f8c34594b1-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-9bxcq\" (UID: \"51c1f6ef-dc9b-4a3a-be06-21f8c34594b1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-9bxcq" Nov 25 10:07:32 crc kubenswrapper[4854]: I1125 10:07:32.787276 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lkjnf\" (UniqueName: \"kubernetes.io/projected/51c1f6ef-dc9b-4a3a-be06-21f8c34594b1-kube-api-access-lkjnf\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-9bxcq\" (UID: \"51c1f6ef-dc9b-4a3a-be06-21f8c34594b1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-9bxcq" Nov 25 10:07:32 crc kubenswrapper[4854]: I1125 10:07:32.787331 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/51c1f6ef-dc9b-4a3a-be06-21f8c34594b1-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-9bxcq\" (UID: \"51c1f6ef-dc9b-4a3a-be06-21f8c34594b1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-9bxcq" Nov 25 10:07:32 crc kubenswrapper[4854]: I1125 10:07:32.801330 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/51c1f6ef-dc9b-4a3a-be06-21f8c34594b1-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-9bxcq\" (UID: \"51c1f6ef-dc9b-4a3a-be06-21f8c34594b1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-9bxcq" Nov 25 10:07:32 crc kubenswrapper[4854]: I1125 10:07:32.803009 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/51c1f6ef-dc9b-4a3a-be06-21f8c34594b1-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-9bxcq\" (UID: \"51c1f6ef-dc9b-4a3a-be06-21f8c34594b1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-9bxcq" Nov 25 10:07:32 crc kubenswrapper[4854]: I1125 10:07:32.804638 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lkjnf\" (UniqueName: \"kubernetes.io/projected/51c1f6ef-dc9b-4a3a-be06-21f8c34594b1-kube-api-access-lkjnf\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-9bxcq\" (UID: \"51c1f6ef-dc9b-4a3a-be06-21f8c34594b1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-9bxcq" Nov 25 10:07:32 crc kubenswrapper[4854]: I1125 10:07:32.934582 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-9bxcq" Nov 25 10:07:33 crc kubenswrapper[4854]: I1125 10:07:33.061194 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-7wk5r"] Nov 25 10:07:33 crc kubenswrapper[4854]: I1125 10:07:33.118275 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-7wk5r"] Nov 25 10:07:33 crc kubenswrapper[4854]: I1125 10:07:33.525859 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-9bxcq"] Nov 25 10:07:33 crc kubenswrapper[4854]: I1125 10:07:33.530430 4854 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 10:07:34 crc kubenswrapper[4854]: I1125 10:07:34.516915 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-9bxcq" event={"ID":"51c1f6ef-dc9b-4a3a-be06-21f8c34594b1","Type":"ContainerStarted","Data":"28502c01eb466951abe2985c23741f8c063d2b52a074a0017dd9a793dcd47c7a"} Nov 25 10:07:34 crc kubenswrapper[4854]: I1125 10:07:34.517240 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-9bxcq" event={"ID":"51c1f6ef-dc9b-4a3a-be06-21f8c34594b1","Type":"ContainerStarted","Data":"c647ebb2f91a4dcb262a627613f1cc4556f63478672236d98d27cdea768900f2"} Nov 25 10:07:34 crc kubenswrapper[4854]: I1125 10:07:34.548258 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-9bxcq" podStartSLOduration=2.130263579 podStartE2EDuration="2.548239179s" podCreationTimestamp="2025-11-25 10:07:32 +0000 UTC" firstStartedPulling="2025-11-25 10:07:33.530149376 +0000 UTC m=+1859.383142752" lastFinishedPulling="2025-11-25 10:07:33.948124966 +0000 UTC m=+1859.801118352" observedRunningTime="2025-11-25 10:07:34.533946977 +0000 UTC m=+1860.386940383" watchObservedRunningTime="2025-11-25 10:07:34.548239179 +0000 UTC m=+1860.401232555" Nov 25 10:07:35 crc kubenswrapper[4854]: I1125 10:07:35.044799 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd7395cb-72b7-4b86-860c-9ba1b3ccd34e" path="/var/lib/kubelet/pods/bd7395cb-72b7-4b86-860c-9ba1b3ccd34e/volumes" Nov 25 10:07:40 crc kubenswrapper[4854]: I1125 10:07:40.040014 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-hmkhw"] Nov 25 10:07:40 crc kubenswrapper[4854]: I1125 10:07:40.058495 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-hmkhw"] Nov 25 10:07:41 crc kubenswrapper[4854]: I1125 10:07:41.048666 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="48cb153a-315a-4e7b-adea-51a576cd48a6" path="/var/lib/kubelet/pods/48cb153a-315a-4e7b-adea-51a576cd48a6/volumes" Nov 25 10:07:44 crc kubenswrapper[4854]: I1125 10:07:44.046295 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-f4l8h"] Nov 25 10:07:44 crc kubenswrapper[4854]: I1125 10:07:44.059044 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-d94f-account-create-md9dh"] Nov 25 10:07:44 crc kubenswrapper[4854]: I1125 10:07:44.072925 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-m2l2v"] Nov 25 10:07:44 crc kubenswrapper[4854]: I1125 10:07:44.083133 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-fbd2-account-create-wh7nh"] Nov 25 10:07:44 crc kubenswrapper[4854]: I1125 10:07:44.093242 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-32fe-account-create-b4mz2"] Nov 25 10:07:44 crc kubenswrapper[4854]: I1125 10:07:44.106209 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-create-7v9gw"] Nov 25 10:07:44 crc kubenswrapper[4854]: I1125 10:07:44.117843 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-m2l2v"] Nov 25 10:07:44 crc kubenswrapper[4854]: I1125 10:07:44.128740 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-d94f-account-create-md9dh"] Nov 25 10:07:44 crc kubenswrapper[4854]: I1125 10:07:44.141778 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-fbd2-account-create-wh7nh"] Nov 25 10:07:44 crc kubenswrapper[4854]: I1125 10:07:44.156050 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-create-7v9gw"] Nov 25 10:07:44 crc kubenswrapper[4854]: I1125 10:07:44.169863 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-32fe-account-create-b4mz2"] Nov 25 10:07:44 crc kubenswrapper[4854]: I1125 10:07:44.180400 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-f4l8h"] Nov 25 10:07:45 crc kubenswrapper[4854]: I1125 10:07:45.031292 4854 scope.go:117] "RemoveContainer" containerID="e218b542fd934fd34b157757f419e89c8565fa64cb58598ebd3da742271577ef" Nov 25 10:07:45 crc kubenswrapper[4854]: I1125 10:07:45.031530 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="89ab6a6b-99ee-4388-92a6-d1e0bb469abc" path="/var/lib/kubelet/pods/89ab6a6b-99ee-4388-92a6-d1e0bb469abc/volumes" Nov 25 10:07:45 crc kubenswrapper[4854]: E1125 10:07:45.032698 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:07:45 crc kubenswrapper[4854]: I1125 10:07:45.036319 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6caae98-1807-47ad-a1b8-ddc1ff33b2d9" path="/var/lib/kubelet/pods/b6caae98-1807-47ad-a1b8-ddc1ff33b2d9/volumes" Nov 25 10:07:45 crc kubenswrapper[4854]: I1125 10:07:45.040259 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba1f1fb5-ec0c-48be-9fd8-a0fd1a040360" path="/var/lib/kubelet/pods/ba1f1fb5-ec0c-48be-9fd8-a0fd1a040360/volumes" Nov 25 10:07:45 crc kubenswrapper[4854]: I1125 10:07:45.041968 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf266cb3-661b-4fbd-8162-8da1268957be" path="/var/lib/kubelet/pods/bf266cb3-661b-4fbd-8162-8da1268957be/volumes" Nov 25 10:07:45 crc kubenswrapper[4854]: I1125 10:07:45.043220 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c7341835-d9f5-46b4-b382-364fab8df0ac" path="/var/lib/kubelet/pods/c7341835-d9f5-46b4-b382-364fab8df0ac/volumes" Nov 25 10:07:45 crc kubenswrapper[4854]: I1125 10:07:45.043986 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dd430211-c49c-40f1-a776-de76234249eb" path="/var/lib/kubelet/pods/dd430211-c49c-40f1-a776-de76234249eb/volumes" Nov 25 10:07:45 crc kubenswrapper[4854]: I1125 10:07:45.045372 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-c575-account-create-kpd7g"] Nov 25 10:07:45 crc kubenswrapper[4854]: I1125 10:07:45.055198 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-c575-account-create-kpd7g"] Nov 25 10:07:47 crc kubenswrapper[4854]: I1125 10:07:47.033113 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7f037bba-7ce0-46ba-8fdf-5f3a3d9183dc" path="/var/lib/kubelet/pods/7f037bba-7ce0-46ba-8fdf-5f3a3d9183dc/volumes" Nov 25 10:07:50 crc kubenswrapper[4854]: I1125 10:07:50.065399 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-4xfns"] Nov 25 10:07:50 crc kubenswrapper[4854]: I1125 10:07:50.078100 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-4xfns"] Nov 25 10:07:51 crc kubenswrapper[4854]: I1125 10:07:51.036085 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a253f619-24d4-453e-815f-c5301c77799c" path="/var/lib/kubelet/pods/a253f619-24d4-453e-815f-c5301c77799c/volumes" Nov 25 10:07:53 crc kubenswrapper[4854]: I1125 10:07:53.251941 4854 scope.go:117] "RemoveContainer" containerID="50fa2b39aafe9ed2f2f7e4c5f45556b0fa77418cdb7b4a598605b75e614df720" Nov 25 10:07:53 crc kubenswrapper[4854]: I1125 10:07:53.290759 4854 scope.go:117] "RemoveContainer" containerID="34a4f1590eff5ac4b1144b819215d001a64f93e44eed16b531bb27693637c721" Nov 25 10:07:53 crc kubenswrapper[4854]: I1125 10:07:53.371110 4854 scope.go:117] "RemoveContainer" containerID="62e81eaac63d5c569bae9bf10530fcb176395c37359dc28f068b0d5893662ef3" Nov 25 10:07:53 crc kubenswrapper[4854]: I1125 10:07:53.428978 4854 scope.go:117] "RemoveContainer" containerID="7d0ddd9a2f9ff19c05e9f3a35af705f152f76840629a045c10895eef1fd68684" Nov 25 10:07:53 crc kubenswrapper[4854]: I1125 10:07:53.478737 4854 scope.go:117] "RemoveContainer" containerID="8331dbd97c5f158d8d4bfa4859d8a4f0e025df23976af7935ba58a65c8fddb53" Nov 25 10:07:53 crc kubenswrapper[4854]: I1125 10:07:53.533438 4854 scope.go:117] "RemoveContainer" containerID="8da2179eb6b2be245fef4ebcc09c6df1ec6b558a1898860c982fcbef041f73cd" Nov 25 10:07:53 crc kubenswrapper[4854]: I1125 10:07:53.605423 4854 scope.go:117] "RemoveContainer" containerID="97d599ea366a8550312d92c5eea199fdc8b2b4d99d9cbfe4b539e537f84f65f3" Nov 25 10:07:53 crc kubenswrapper[4854]: I1125 10:07:53.630608 4854 scope.go:117] "RemoveContainer" containerID="ff773b5022ada182235f3b5c352a4451615969efc51224aa361b23293584e973" Nov 25 10:07:53 crc kubenswrapper[4854]: I1125 10:07:53.670297 4854 scope.go:117] "RemoveContainer" containerID="50e9a62c0abbe5c38a739be7b8cf7f8e187369ad98c2621efe205c6bf417c488" Nov 25 10:07:53 crc kubenswrapper[4854]: I1125 10:07:53.694139 4854 scope.go:117] "RemoveContainer" containerID="6a5eac02fed633d574667927717afeda3d68015787b61fa294d5e9ca90f59202" Nov 25 10:07:53 crc kubenswrapper[4854]: I1125 10:07:53.719146 4854 scope.go:117] "RemoveContainer" containerID="6847d5629dfe97b64f42dec56e4ee75497bdc8207fdcd3979b11bbcdce32c140" Nov 25 10:07:53 crc kubenswrapper[4854]: I1125 10:07:53.752345 4854 scope.go:117] "RemoveContainer" containerID="6bc97d0196bd758efdf5f832f16030608ad6c721d833a8496314b304a93ee311" Nov 25 10:07:53 crc kubenswrapper[4854]: I1125 10:07:53.779266 4854 scope.go:117] "RemoveContainer" containerID="0491b636aa9ffb5c6cb3b6acc5f0a46ffd190ab374a3b71623af60793f016a8f" Nov 25 10:07:53 crc kubenswrapper[4854]: I1125 10:07:53.800331 4854 scope.go:117] "RemoveContainer" containerID="baff4ed1020036b4da8754ee2e1ab5e73579e8d40fcdca9d941d9806c183fefb" Nov 25 10:07:53 crc kubenswrapper[4854]: I1125 10:07:53.833650 4854 scope.go:117] "RemoveContainer" containerID="1fc7dc122c45fb487247878641dc22a52eb26553b44ba9c5877212dd062970dc" Nov 25 10:07:53 crc kubenswrapper[4854]: I1125 10:07:53.856866 4854 scope.go:117] "RemoveContainer" containerID="17c1d0e49e0440a3b70852177d5cad6b9752c11c9cd22892488e81922e5f82e9" Nov 25 10:07:53 crc kubenswrapper[4854]: I1125 10:07:53.879629 4854 scope.go:117] "RemoveContainer" containerID="4a041cdb915f0473f0e051a9de5d7ed6834c52efbf7e77de3a0c70381628bbb0" Nov 25 10:07:53 crc kubenswrapper[4854]: I1125 10:07:53.913755 4854 scope.go:117] "RemoveContainer" containerID="e95af3f44186f5bed8bf345c0f41ab3788ca2cc184c1c349f1f45e4f9523409d" Nov 25 10:07:53 crc kubenswrapper[4854]: I1125 10:07:53.944565 4854 scope.go:117] "RemoveContainer" containerID="86aee007c08258f16d7873af7f2909e890cd1f2fa8bc7a60f57eea84c1b7ff46" Nov 25 10:07:53 crc kubenswrapper[4854]: I1125 10:07:53.967765 4854 scope.go:117] "RemoveContainer" containerID="332e57882fa8752c800c3eefd29bd426a505503148fb91314bcf43a6d4e8ac07" Nov 25 10:07:57 crc kubenswrapper[4854]: I1125 10:07:57.014756 4854 scope.go:117] "RemoveContainer" containerID="e218b542fd934fd34b157757f419e89c8565fa64cb58598ebd3da742271577ef" Nov 25 10:07:57 crc kubenswrapper[4854]: I1125 10:07:57.801322 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerStarted","Data":"c0e53261c1b00dc00042ef24d7f5409ac882efa92a250513a37cff1d9e331ec1"} Nov 25 10:08:32 crc kubenswrapper[4854]: I1125 10:08:32.056454 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-5n9gc"] Nov 25 10:08:32 crc kubenswrapper[4854]: I1125 10:08:32.072786 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-5n9gc"] Nov 25 10:08:33 crc kubenswrapper[4854]: I1125 10:08:33.024925 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="33e7955b-280b-4907-a8ee-7fb1a46d6352" path="/var/lib/kubelet/pods/33e7955b-280b-4907-a8ee-7fb1a46d6352/volumes" Nov 25 10:08:35 crc kubenswrapper[4854]: I1125 10:08:35.056535 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-h58qt"] Nov 25 10:08:35 crc kubenswrapper[4854]: I1125 10:08:35.074243 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-h58qt"] Nov 25 10:08:37 crc kubenswrapper[4854]: I1125 10:08:37.041132 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98" path="/var/lib/kubelet/pods/3bd0c35d-5f7c-47aa-bdf3-5d5e7ff61c98/volumes" Nov 25 10:08:37 crc kubenswrapper[4854]: I1125 10:08:37.042345 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-hkj5t"] Nov 25 10:08:37 crc kubenswrapper[4854]: I1125 10:08:37.045416 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-hkj5t"] Nov 25 10:08:39 crc kubenswrapper[4854]: I1125 10:08:39.026688 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="70897159-9d6f-44cc-9b46-5f6a5d18fd8b" path="/var/lib/kubelet/pods/70897159-9d6f-44cc-9b46-5f6a5d18fd8b/volumes" Nov 25 10:08:54 crc kubenswrapper[4854]: I1125 10:08:54.419490 4854 scope.go:117] "RemoveContainer" containerID="10f7ec26259addf257420fc5f9b059d31430876f4caed02a03148e2aa636e30a" Nov 25 10:08:54 crc kubenswrapper[4854]: I1125 10:08:54.474511 4854 scope.go:117] "RemoveContainer" containerID="089d2a430f10afef4cfd498ffb2b28f64c94c7dfd45e7109263a3e93173a32a3" Nov 25 10:08:54 crc kubenswrapper[4854]: I1125 10:08:54.532351 4854 scope.go:117] "RemoveContainer" containerID="e644f8b37a3dd8708617101f297f3fb6fe3734660cd125b9a812de9c4e28cf78" Nov 25 10:08:55 crc kubenswrapper[4854]: I1125 10:08:55.045510 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-5h66s"] Nov 25 10:08:55 crc kubenswrapper[4854]: I1125 10:08:55.056821 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-5h66s"] Nov 25 10:08:57 crc kubenswrapper[4854]: I1125 10:08:57.030563 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="816b6c7b-9d88-412e-8e20-5630cc8fd4a9" path="/var/lib/kubelet/pods/816b6c7b-9d88-412e-8e20-5630cc8fd4a9/volumes" Nov 25 10:09:01 crc kubenswrapper[4854]: I1125 10:09:01.056648 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-p594s"] Nov 25 10:09:01 crc kubenswrapper[4854]: I1125 10:09:01.070995 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-p594s"] Nov 25 10:09:03 crc kubenswrapper[4854]: I1125 10:09:03.037336 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4f828059-1092-45cd-99a8-3915b6bab37f" path="/var/lib/kubelet/pods/4f828059-1092-45cd-99a8-3915b6bab37f/volumes" Nov 25 10:09:11 crc kubenswrapper[4854]: I1125 10:09:11.678663 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wmj76"] Nov 25 10:09:11 crc kubenswrapper[4854]: I1125 10:09:11.682425 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wmj76" Nov 25 10:09:11 crc kubenswrapper[4854]: I1125 10:09:11.705832 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wmj76"] Nov 25 10:09:11 crc kubenswrapper[4854]: I1125 10:09:11.797310 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sbmwd\" (UniqueName: \"kubernetes.io/projected/57612c0b-691c-44c3-8de6-9683360a505a-kube-api-access-sbmwd\") pod \"redhat-operators-wmj76\" (UID: \"57612c0b-691c-44c3-8de6-9683360a505a\") " pod="openshift-marketplace/redhat-operators-wmj76" Nov 25 10:09:11 crc kubenswrapper[4854]: I1125 10:09:11.797695 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57612c0b-691c-44c3-8de6-9683360a505a-catalog-content\") pod \"redhat-operators-wmj76\" (UID: \"57612c0b-691c-44c3-8de6-9683360a505a\") " pod="openshift-marketplace/redhat-operators-wmj76" Nov 25 10:09:11 crc kubenswrapper[4854]: I1125 10:09:11.797787 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57612c0b-691c-44c3-8de6-9683360a505a-utilities\") pod \"redhat-operators-wmj76\" (UID: \"57612c0b-691c-44c3-8de6-9683360a505a\") " pod="openshift-marketplace/redhat-operators-wmj76" Nov 25 10:09:11 crc kubenswrapper[4854]: I1125 10:09:11.900355 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sbmwd\" (UniqueName: \"kubernetes.io/projected/57612c0b-691c-44c3-8de6-9683360a505a-kube-api-access-sbmwd\") pod \"redhat-operators-wmj76\" (UID: \"57612c0b-691c-44c3-8de6-9683360a505a\") " pod="openshift-marketplace/redhat-operators-wmj76" Nov 25 10:09:11 crc kubenswrapper[4854]: I1125 10:09:11.900457 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57612c0b-691c-44c3-8de6-9683360a505a-catalog-content\") pod \"redhat-operators-wmj76\" (UID: \"57612c0b-691c-44c3-8de6-9683360a505a\") " pod="openshift-marketplace/redhat-operators-wmj76" Nov 25 10:09:11 crc kubenswrapper[4854]: I1125 10:09:11.900522 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57612c0b-691c-44c3-8de6-9683360a505a-utilities\") pod \"redhat-operators-wmj76\" (UID: \"57612c0b-691c-44c3-8de6-9683360a505a\") " pod="openshift-marketplace/redhat-operators-wmj76" Nov 25 10:09:11 crc kubenswrapper[4854]: I1125 10:09:11.901248 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57612c0b-691c-44c3-8de6-9683360a505a-utilities\") pod \"redhat-operators-wmj76\" (UID: \"57612c0b-691c-44c3-8de6-9683360a505a\") " pod="openshift-marketplace/redhat-operators-wmj76" Nov 25 10:09:11 crc kubenswrapper[4854]: I1125 10:09:11.901304 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57612c0b-691c-44c3-8de6-9683360a505a-catalog-content\") pod \"redhat-operators-wmj76\" (UID: \"57612c0b-691c-44c3-8de6-9683360a505a\") " pod="openshift-marketplace/redhat-operators-wmj76" Nov 25 10:09:11 crc kubenswrapper[4854]: I1125 10:09:11.924583 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sbmwd\" (UniqueName: \"kubernetes.io/projected/57612c0b-691c-44c3-8de6-9683360a505a-kube-api-access-sbmwd\") pod \"redhat-operators-wmj76\" (UID: \"57612c0b-691c-44c3-8de6-9683360a505a\") " pod="openshift-marketplace/redhat-operators-wmj76" Nov 25 10:09:12 crc kubenswrapper[4854]: I1125 10:09:12.032505 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wmj76" Nov 25 10:09:12 crc kubenswrapper[4854]: I1125 10:09:12.512397 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wmj76"] Nov 25 10:09:12 crc kubenswrapper[4854]: I1125 10:09:12.693132 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wmj76" event={"ID":"57612c0b-691c-44c3-8de6-9683360a505a","Type":"ContainerStarted","Data":"93ef3a968ba86f3415907b45828e4c6de3d3d27dcf29592353e8d677fc688ad9"} Nov 25 10:09:13 crc kubenswrapper[4854]: I1125 10:09:13.706538 4854 generic.go:334] "Generic (PLEG): container finished" podID="57612c0b-691c-44c3-8de6-9683360a505a" containerID="a6389f7732a31be45c53091ed5e9096d0d0683e9aa6a7d8d18f2bafb0127579b" exitCode=0 Nov 25 10:09:13 crc kubenswrapper[4854]: I1125 10:09:13.706733 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wmj76" event={"ID":"57612c0b-691c-44c3-8de6-9683360a505a","Type":"ContainerDied","Data":"a6389f7732a31be45c53091ed5e9096d0d0683e9aa6a7d8d18f2bafb0127579b"} Nov 25 10:09:15 crc kubenswrapper[4854]: I1125 10:09:15.732211 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wmj76" event={"ID":"57612c0b-691c-44c3-8de6-9683360a505a","Type":"ContainerStarted","Data":"4c39e66f9d67f0e29c27c6dad6d2eddb139464a3c14cfbedf251c0f385b8d9e0"} Nov 25 10:09:19 crc kubenswrapper[4854]: I1125 10:09:19.783740 4854 generic.go:334] "Generic (PLEG): container finished" podID="57612c0b-691c-44c3-8de6-9683360a505a" containerID="4c39e66f9d67f0e29c27c6dad6d2eddb139464a3c14cfbedf251c0f385b8d9e0" exitCode=0 Nov 25 10:09:19 crc kubenswrapper[4854]: I1125 10:09:19.783800 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wmj76" event={"ID":"57612c0b-691c-44c3-8de6-9683360a505a","Type":"ContainerDied","Data":"4c39e66f9d67f0e29c27c6dad6d2eddb139464a3c14cfbedf251c0f385b8d9e0"} Nov 25 10:09:20 crc kubenswrapper[4854]: I1125 10:09:20.796535 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wmj76" event={"ID":"57612c0b-691c-44c3-8de6-9683360a505a","Type":"ContainerStarted","Data":"b0dab9b5bf84becaf4c7a745f7e87cec60cf069fef4cff8a43dd294998c9036a"} Nov 25 10:09:20 crc kubenswrapper[4854]: I1125 10:09:20.849185 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wmj76" podStartSLOduration=3.319646733 podStartE2EDuration="9.849138398s" podCreationTimestamp="2025-11-25 10:09:11 +0000 UTC" firstStartedPulling="2025-11-25 10:09:13.709918702 +0000 UTC m=+1959.562912078" lastFinishedPulling="2025-11-25 10:09:20.239410357 +0000 UTC m=+1966.092403743" observedRunningTime="2025-11-25 10:09:20.812291976 +0000 UTC m=+1966.665285362" watchObservedRunningTime="2025-11-25 10:09:20.849138398 +0000 UTC m=+1966.702131774" Nov 25 10:09:22 crc kubenswrapper[4854]: I1125 10:09:22.033302 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wmj76" Nov 25 10:09:22 crc kubenswrapper[4854]: I1125 10:09:22.033726 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wmj76" Nov 25 10:09:23 crc kubenswrapper[4854]: I1125 10:09:23.084116 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wmj76" podUID="57612c0b-691c-44c3-8de6-9683360a505a" containerName="registry-server" probeResult="failure" output=< Nov 25 10:09:23 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 10:09:23 crc kubenswrapper[4854]: > Nov 25 10:09:29 crc kubenswrapper[4854]: I1125 10:09:29.451525 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-ptj86"] Nov 25 10:09:29 crc kubenswrapper[4854]: I1125 10:09:29.458441 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ptj86" Nov 25 10:09:29 crc kubenswrapper[4854]: I1125 10:09:29.483838 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ptj86"] Nov 25 10:09:29 crc kubenswrapper[4854]: I1125 10:09:29.550530 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d7354e94-ef6d-491d-bc73-ff14126eae41-utilities\") pod \"redhat-marketplace-ptj86\" (UID: \"d7354e94-ef6d-491d-bc73-ff14126eae41\") " pod="openshift-marketplace/redhat-marketplace-ptj86" Nov 25 10:09:29 crc kubenswrapper[4854]: I1125 10:09:29.550735 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d7354e94-ef6d-491d-bc73-ff14126eae41-catalog-content\") pod \"redhat-marketplace-ptj86\" (UID: \"d7354e94-ef6d-491d-bc73-ff14126eae41\") " pod="openshift-marketplace/redhat-marketplace-ptj86" Nov 25 10:09:29 crc kubenswrapper[4854]: I1125 10:09:29.550768 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xvzh4\" (UniqueName: \"kubernetes.io/projected/d7354e94-ef6d-491d-bc73-ff14126eae41-kube-api-access-xvzh4\") pod \"redhat-marketplace-ptj86\" (UID: \"d7354e94-ef6d-491d-bc73-ff14126eae41\") " pod="openshift-marketplace/redhat-marketplace-ptj86" Nov 25 10:09:29 crc kubenswrapper[4854]: I1125 10:09:29.652486 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d7354e94-ef6d-491d-bc73-ff14126eae41-catalog-content\") pod \"redhat-marketplace-ptj86\" (UID: \"d7354e94-ef6d-491d-bc73-ff14126eae41\") " pod="openshift-marketplace/redhat-marketplace-ptj86" Nov 25 10:09:29 crc kubenswrapper[4854]: I1125 10:09:29.652784 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xvzh4\" (UniqueName: \"kubernetes.io/projected/d7354e94-ef6d-491d-bc73-ff14126eae41-kube-api-access-xvzh4\") pod \"redhat-marketplace-ptj86\" (UID: \"d7354e94-ef6d-491d-bc73-ff14126eae41\") " pod="openshift-marketplace/redhat-marketplace-ptj86" Nov 25 10:09:29 crc kubenswrapper[4854]: I1125 10:09:29.652948 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d7354e94-ef6d-491d-bc73-ff14126eae41-utilities\") pod \"redhat-marketplace-ptj86\" (UID: \"d7354e94-ef6d-491d-bc73-ff14126eae41\") " pod="openshift-marketplace/redhat-marketplace-ptj86" Nov 25 10:09:29 crc kubenswrapper[4854]: I1125 10:09:29.653041 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d7354e94-ef6d-491d-bc73-ff14126eae41-catalog-content\") pod \"redhat-marketplace-ptj86\" (UID: \"d7354e94-ef6d-491d-bc73-ff14126eae41\") " pod="openshift-marketplace/redhat-marketplace-ptj86" Nov 25 10:09:29 crc kubenswrapper[4854]: I1125 10:09:29.653430 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d7354e94-ef6d-491d-bc73-ff14126eae41-utilities\") pod \"redhat-marketplace-ptj86\" (UID: \"d7354e94-ef6d-491d-bc73-ff14126eae41\") " pod="openshift-marketplace/redhat-marketplace-ptj86" Nov 25 10:09:29 crc kubenswrapper[4854]: I1125 10:09:29.678156 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xvzh4\" (UniqueName: \"kubernetes.io/projected/d7354e94-ef6d-491d-bc73-ff14126eae41-kube-api-access-xvzh4\") pod \"redhat-marketplace-ptj86\" (UID: \"d7354e94-ef6d-491d-bc73-ff14126eae41\") " pod="openshift-marketplace/redhat-marketplace-ptj86" Nov 25 10:09:29 crc kubenswrapper[4854]: I1125 10:09:29.791842 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ptj86" Nov 25 10:09:30 crc kubenswrapper[4854]: I1125 10:09:30.328438 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ptj86"] Nov 25 10:09:30 crc kubenswrapper[4854]: I1125 10:09:30.920641 4854 generic.go:334] "Generic (PLEG): container finished" podID="d7354e94-ef6d-491d-bc73-ff14126eae41" containerID="aa4769415b52fa23f9d521399c635055a691f725fdf5e3022ac46532452d7144" exitCode=0 Nov 25 10:09:30 crc kubenswrapper[4854]: I1125 10:09:30.920925 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ptj86" event={"ID":"d7354e94-ef6d-491d-bc73-ff14126eae41","Type":"ContainerDied","Data":"aa4769415b52fa23f9d521399c635055a691f725fdf5e3022ac46532452d7144"} Nov 25 10:09:30 crc kubenswrapper[4854]: I1125 10:09:30.921156 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ptj86" event={"ID":"d7354e94-ef6d-491d-bc73-ff14126eae41","Type":"ContainerStarted","Data":"cdfd67ec883c7f85fb5131cd48225831a420a36613f753785f7023d1dc5dc758"} Nov 25 10:09:31 crc kubenswrapper[4854]: I1125 10:09:31.933963 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ptj86" event={"ID":"d7354e94-ef6d-491d-bc73-ff14126eae41","Type":"ContainerStarted","Data":"132cd11cfdbee1cc0517dd81b62d2d1eeaf2fdea2ecc46c03c5074962f24c467"} Nov 25 10:09:32 crc kubenswrapper[4854]: I1125 10:09:32.090703 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wmj76" Nov 25 10:09:32 crc kubenswrapper[4854]: I1125 10:09:32.150002 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wmj76" Nov 25 10:09:32 crc kubenswrapper[4854]: I1125 10:09:32.959756 4854 generic.go:334] "Generic (PLEG): container finished" podID="d7354e94-ef6d-491d-bc73-ff14126eae41" containerID="132cd11cfdbee1cc0517dd81b62d2d1eeaf2fdea2ecc46c03c5074962f24c467" exitCode=0 Nov 25 10:09:32 crc kubenswrapper[4854]: I1125 10:09:32.959948 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ptj86" event={"ID":"d7354e94-ef6d-491d-bc73-ff14126eae41","Type":"ContainerDied","Data":"132cd11cfdbee1cc0517dd81b62d2d1eeaf2fdea2ecc46c03c5074962f24c467"} Nov 25 10:09:33 crc kubenswrapper[4854]: I1125 10:09:33.972304 4854 generic.go:334] "Generic (PLEG): container finished" podID="51c1f6ef-dc9b-4a3a-be06-21f8c34594b1" containerID="28502c01eb466951abe2985c23741f8c063d2b52a074a0017dd9a793dcd47c7a" exitCode=0 Nov 25 10:09:33 crc kubenswrapper[4854]: I1125 10:09:33.972381 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-9bxcq" event={"ID":"51c1f6ef-dc9b-4a3a-be06-21f8c34594b1","Type":"ContainerDied","Data":"28502c01eb466951abe2985c23741f8c063d2b52a074a0017dd9a793dcd47c7a"} Nov 25 10:09:33 crc kubenswrapper[4854]: I1125 10:09:33.975420 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ptj86" event={"ID":"d7354e94-ef6d-491d-bc73-ff14126eae41","Type":"ContainerStarted","Data":"25dd8d6402c7615512f77f7d9841d21858acefcec2d2e02f45e7aae9706300f6"} Nov 25 10:09:34 crc kubenswrapper[4854]: I1125 10:09:34.015150 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-ptj86" podStartSLOduration=2.454272857 podStartE2EDuration="5.015123571s" podCreationTimestamp="2025-11-25 10:09:29 +0000 UTC" firstStartedPulling="2025-11-25 10:09:30.924652725 +0000 UTC m=+1976.777646111" lastFinishedPulling="2025-11-25 10:09:33.485503449 +0000 UTC m=+1979.338496825" observedRunningTime="2025-11-25 10:09:34.010631078 +0000 UTC m=+1979.863624474" watchObservedRunningTime="2025-11-25 10:09:34.015123571 +0000 UTC m=+1979.868116957" Nov 25 10:09:34 crc kubenswrapper[4854]: I1125 10:09:34.378023 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wmj76"] Nov 25 10:09:34 crc kubenswrapper[4854]: I1125 10:09:34.378269 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wmj76" podUID="57612c0b-691c-44c3-8de6-9683360a505a" containerName="registry-server" containerID="cri-o://b0dab9b5bf84becaf4c7a745f7e87cec60cf069fef4cff8a43dd294998c9036a" gracePeriod=2 Nov 25 10:09:34 crc kubenswrapper[4854]: I1125 10:09:34.956791 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wmj76" Nov 25 10:09:35 crc kubenswrapper[4854]: I1125 10:09:35.000748 4854 generic.go:334] "Generic (PLEG): container finished" podID="57612c0b-691c-44c3-8de6-9683360a505a" containerID="b0dab9b5bf84becaf4c7a745f7e87cec60cf069fef4cff8a43dd294998c9036a" exitCode=0 Nov 25 10:09:35 crc kubenswrapper[4854]: I1125 10:09:35.000979 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wmj76" Nov 25 10:09:35 crc kubenswrapper[4854]: I1125 10:09:35.001616 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wmj76" event={"ID":"57612c0b-691c-44c3-8de6-9683360a505a","Type":"ContainerDied","Data":"b0dab9b5bf84becaf4c7a745f7e87cec60cf069fef4cff8a43dd294998c9036a"} Nov 25 10:09:35 crc kubenswrapper[4854]: I1125 10:09:35.001645 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wmj76" event={"ID":"57612c0b-691c-44c3-8de6-9683360a505a","Type":"ContainerDied","Data":"93ef3a968ba86f3415907b45828e4c6de3d3d27dcf29592353e8d677fc688ad9"} Nov 25 10:09:35 crc kubenswrapper[4854]: I1125 10:09:35.001663 4854 scope.go:117] "RemoveContainer" containerID="b0dab9b5bf84becaf4c7a745f7e87cec60cf069fef4cff8a43dd294998c9036a" Nov 25 10:09:35 crc kubenswrapper[4854]: I1125 10:09:35.060598 4854 scope.go:117] "RemoveContainer" containerID="4c39e66f9d67f0e29c27c6dad6d2eddb139464a3c14cfbedf251c0f385b8d9e0" Nov 25 10:09:35 crc kubenswrapper[4854]: I1125 10:09:35.090457 4854 scope.go:117] "RemoveContainer" containerID="a6389f7732a31be45c53091ed5e9096d0d0683e9aa6a7d8d18f2bafb0127579b" Nov 25 10:09:35 crc kubenswrapper[4854]: I1125 10:09:35.131983 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57612c0b-691c-44c3-8de6-9683360a505a-catalog-content\") pod \"57612c0b-691c-44c3-8de6-9683360a505a\" (UID: \"57612c0b-691c-44c3-8de6-9683360a505a\") " Nov 25 10:09:35 crc kubenswrapper[4854]: I1125 10:09:35.132157 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57612c0b-691c-44c3-8de6-9683360a505a-utilities\") pod \"57612c0b-691c-44c3-8de6-9683360a505a\" (UID: \"57612c0b-691c-44c3-8de6-9683360a505a\") " Nov 25 10:09:35 crc kubenswrapper[4854]: I1125 10:09:35.132332 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sbmwd\" (UniqueName: \"kubernetes.io/projected/57612c0b-691c-44c3-8de6-9683360a505a-kube-api-access-sbmwd\") pod \"57612c0b-691c-44c3-8de6-9683360a505a\" (UID: \"57612c0b-691c-44c3-8de6-9683360a505a\") " Nov 25 10:09:35 crc kubenswrapper[4854]: I1125 10:09:35.133244 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57612c0b-691c-44c3-8de6-9683360a505a-utilities" (OuterVolumeSpecName: "utilities") pod "57612c0b-691c-44c3-8de6-9683360a505a" (UID: "57612c0b-691c-44c3-8de6-9683360a505a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:09:35 crc kubenswrapper[4854]: I1125 10:09:35.137865 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57612c0b-691c-44c3-8de6-9683360a505a-kube-api-access-sbmwd" (OuterVolumeSpecName: "kube-api-access-sbmwd") pod "57612c0b-691c-44c3-8de6-9683360a505a" (UID: "57612c0b-691c-44c3-8de6-9683360a505a"). InnerVolumeSpecName "kube-api-access-sbmwd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:09:35 crc kubenswrapper[4854]: I1125 10:09:35.223912 4854 scope.go:117] "RemoveContainer" containerID="b0dab9b5bf84becaf4c7a745f7e87cec60cf069fef4cff8a43dd294998c9036a" Nov 25 10:09:35 crc kubenswrapper[4854]: E1125 10:09:35.224445 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b0dab9b5bf84becaf4c7a745f7e87cec60cf069fef4cff8a43dd294998c9036a\": container with ID starting with b0dab9b5bf84becaf4c7a745f7e87cec60cf069fef4cff8a43dd294998c9036a not found: ID does not exist" containerID="b0dab9b5bf84becaf4c7a745f7e87cec60cf069fef4cff8a43dd294998c9036a" Nov 25 10:09:35 crc kubenswrapper[4854]: I1125 10:09:35.224488 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b0dab9b5bf84becaf4c7a745f7e87cec60cf069fef4cff8a43dd294998c9036a"} err="failed to get container status \"b0dab9b5bf84becaf4c7a745f7e87cec60cf069fef4cff8a43dd294998c9036a\": rpc error: code = NotFound desc = could not find container \"b0dab9b5bf84becaf4c7a745f7e87cec60cf069fef4cff8a43dd294998c9036a\": container with ID starting with b0dab9b5bf84becaf4c7a745f7e87cec60cf069fef4cff8a43dd294998c9036a not found: ID does not exist" Nov 25 10:09:35 crc kubenswrapper[4854]: I1125 10:09:35.224514 4854 scope.go:117] "RemoveContainer" containerID="4c39e66f9d67f0e29c27c6dad6d2eddb139464a3c14cfbedf251c0f385b8d9e0" Nov 25 10:09:35 crc kubenswrapper[4854]: E1125 10:09:35.224871 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c39e66f9d67f0e29c27c6dad6d2eddb139464a3c14cfbedf251c0f385b8d9e0\": container with ID starting with 4c39e66f9d67f0e29c27c6dad6d2eddb139464a3c14cfbedf251c0f385b8d9e0 not found: ID does not exist" containerID="4c39e66f9d67f0e29c27c6dad6d2eddb139464a3c14cfbedf251c0f385b8d9e0" Nov 25 10:09:35 crc kubenswrapper[4854]: I1125 10:09:35.224932 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c39e66f9d67f0e29c27c6dad6d2eddb139464a3c14cfbedf251c0f385b8d9e0"} err="failed to get container status \"4c39e66f9d67f0e29c27c6dad6d2eddb139464a3c14cfbedf251c0f385b8d9e0\": rpc error: code = NotFound desc = could not find container \"4c39e66f9d67f0e29c27c6dad6d2eddb139464a3c14cfbedf251c0f385b8d9e0\": container with ID starting with 4c39e66f9d67f0e29c27c6dad6d2eddb139464a3c14cfbedf251c0f385b8d9e0 not found: ID does not exist" Nov 25 10:09:35 crc kubenswrapper[4854]: I1125 10:09:35.224961 4854 scope.go:117] "RemoveContainer" containerID="a6389f7732a31be45c53091ed5e9096d0d0683e9aa6a7d8d18f2bafb0127579b" Nov 25 10:09:35 crc kubenswrapper[4854]: E1125 10:09:35.225402 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a6389f7732a31be45c53091ed5e9096d0d0683e9aa6a7d8d18f2bafb0127579b\": container with ID starting with a6389f7732a31be45c53091ed5e9096d0d0683e9aa6a7d8d18f2bafb0127579b not found: ID does not exist" containerID="a6389f7732a31be45c53091ed5e9096d0d0683e9aa6a7d8d18f2bafb0127579b" Nov 25 10:09:35 crc kubenswrapper[4854]: I1125 10:09:35.225445 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6389f7732a31be45c53091ed5e9096d0d0683e9aa6a7d8d18f2bafb0127579b"} err="failed to get container status \"a6389f7732a31be45c53091ed5e9096d0d0683e9aa6a7d8d18f2bafb0127579b\": rpc error: code = NotFound desc = could not find container \"a6389f7732a31be45c53091ed5e9096d0d0683e9aa6a7d8d18f2bafb0127579b\": container with ID starting with a6389f7732a31be45c53091ed5e9096d0d0683e9aa6a7d8d18f2bafb0127579b not found: ID does not exist" Nov 25 10:09:35 crc kubenswrapper[4854]: I1125 10:09:35.236187 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sbmwd\" (UniqueName: \"kubernetes.io/projected/57612c0b-691c-44c3-8de6-9683360a505a-kube-api-access-sbmwd\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:35 crc kubenswrapper[4854]: I1125 10:09:35.236229 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57612c0b-691c-44c3-8de6-9683360a505a-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:35 crc kubenswrapper[4854]: I1125 10:09:35.245386 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57612c0b-691c-44c3-8de6-9683360a505a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57612c0b-691c-44c3-8de6-9683360a505a" (UID: "57612c0b-691c-44c3-8de6-9683360a505a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:09:35 crc kubenswrapper[4854]: I1125 10:09:35.338817 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57612c0b-691c-44c3-8de6-9683360a505a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:35 crc kubenswrapper[4854]: I1125 10:09:35.354913 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wmj76"] Nov 25 10:09:35 crc kubenswrapper[4854]: I1125 10:09:35.367750 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wmj76"] Nov 25 10:09:35 crc kubenswrapper[4854]: I1125 10:09:35.433566 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-9bxcq" Nov 25 10:09:35 crc kubenswrapper[4854]: I1125 10:09:35.542925 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/51c1f6ef-dc9b-4a3a-be06-21f8c34594b1-inventory\") pod \"51c1f6ef-dc9b-4a3a-be06-21f8c34594b1\" (UID: \"51c1f6ef-dc9b-4a3a-be06-21f8c34594b1\") " Nov 25 10:09:35 crc kubenswrapper[4854]: I1125 10:09:35.543023 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lkjnf\" (UniqueName: \"kubernetes.io/projected/51c1f6ef-dc9b-4a3a-be06-21f8c34594b1-kube-api-access-lkjnf\") pod \"51c1f6ef-dc9b-4a3a-be06-21f8c34594b1\" (UID: \"51c1f6ef-dc9b-4a3a-be06-21f8c34594b1\") " Nov 25 10:09:35 crc kubenswrapper[4854]: I1125 10:09:35.543062 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/51c1f6ef-dc9b-4a3a-be06-21f8c34594b1-ssh-key\") pod \"51c1f6ef-dc9b-4a3a-be06-21f8c34594b1\" (UID: \"51c1f6ef-dc9b-4a3a-be06-21f8c34594b1\") " Nov 25 10:09:35 crc kubenswrapper[4854]: I1125 10:09:35.548128 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51c1f6ef-dc9b-4a3a-be06-21f8c34594b1-kube-api-access-lkjnf" (OuterVolumeSpecName: "kube-api-access-lkjnf") pod "51c1f6ef-dc9b-4a3a-be06-21f8c34594b1" (UID: "51c1f6ef-dc9b-4a3a-be06-21f8c34594b1"). InnerVolumeSpecName "kube-api-access-lkjnf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:09:35 crc kubenswrapper[4854]: I1125 10:09:35.599279 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/51c1f6ef-dc9b-4a3a-be06-21f8c34594b1-inventory" (OuterVolumeSpecName: "inventory") pod "51c1f6ef-dc9b-4a3a-be06-21f8c34594b1" (UID: "51c1f6ef-dc9b-4a3a-be06-21f8c34594b1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:35 crc kubenswrapper[4854]: I1125 10:09:35.603958 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/51c1f6ef-dc9b-4a3a-be06-21f8c34594b1-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "51c1f6ef-dc9b-4a3a-be06-21f8c34594b1" (UID: "51c1f6ef-dc9b-4a3a-be06-21f8c34594b1"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:35 crc kubenswrapper[4854]: I1125 10:09:35.646508 4854 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/51c1f6ef-dc9b-4a3a-be06-21f8c34594b1-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:35 crc kubenswrapper[4854]: I1125 10:09:35.646545 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lkjnf\" (UniqueName: \"kubernetes.io/projected/51c1f6ef-dc9b-4a3a-be06-21f8c34594b1-kube-api-access-lkjnf\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:35 crc kubenswrapper[4854]: I1125 10:09:35.646557 4854 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/51c1f6ef-dc9b-4a3a-be06-21f8c34594b1-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:36 crc kubenswrapper[4854]: I1125 10:09:36.015867 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-9bxcq" event={"ID":"51c1f6ef-dc9b-4a3a-be06-21f8c34594b1","Type":"ContainerDied","Data":"c647ebb2f91a4dcb262a627613f1cc4556f63478672236d98d27cdea768900f2"} Nov 25 10:09:36 crc kubenswrapper[4854]: I1125 10:09:36.015925 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c647ebb2f91a4dcb262a627613f1cc4556f63478672236d98d27cdea768900f2" Nov 25 10:09:36 crc kubenswrapper[4854]: I1125 10:09:36.016015 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-9bxcq" Nov 25 10:09:36 crc kubenswrapper[4854]: I1125 10:09:36.117569 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b6lkz"] Nov 25 10:09:36 crc kubenswrapper[4854]: E1125 10:09:36.118144 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57612c0b-691c-44c3-8de6-9683360a505a" containerName="extract-utilities" Nov 25 10:09:36 crc kubenswrapper[4854]: I1125 10:09:36.118171 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="57612c0b-691c-44c3-8de6-9683360a505a" containerName="extract-utilities" Nov 25 10:09:36 crc kubenswrapper[4854]: E1125 10:09:36.118196 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57612c0b-691c-44c3-8de6-9683360a505a" containerName="extract-content" Nov 25 10:09:36 crc kubenswrapper[4854]: I1125 10:09:36.118205 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="57612c0b-691c-44c3-8de6-9683360a505a" containerName="extract-content" Nov 25 10:09:36 crc kubenswrapper[4854]: E1125 10:09:36.118219 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51c1f6ef-dc9b-4a3a-be06-21f8c34594b1" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 25 10:09:36 crc kubenswrapper[4854]: I1125 10:09:36.118228 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="51c1f6ef-dc9b-4a3a-be06-21f8c34594b1" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 25 10:09:36 crc kubenswrapper[4854]: E1125 10:09:36.118281 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57612c0b-691c-44c3-8de6-9683360a505a" containerName="registry-server" Nov 25 10:09:36 crc kubenswrapper[4854]: I1125 10:09:36.118290 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="57612c0b-691c-44c3-8de6-9683360a505a" containerName="registry-server" Nov 25 10:09:36 crc kubenswrapper[4854]: I1125 10:09:36.118554 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="51c1f6ef-dc9b-4a3a-be06-21f8c34594b1" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 25 10:09:36 crc kubenswrapper[4854]: I1125 10:09:36.118596 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="57612c0b-691c-44c3-8de6-9683360a505a" containerName="registry-server" Nov 25 10:09:36 crc kubenswrapper[4854]: I1125 10:09:36.119540 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b6lkz" Nov 25 10:09:36 crc kubenswrapper[4854]: I1125 10:09:36.122604 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-6xbdw" Nov 25 10:09:36 crc kubenswrapper[4854]: I1125 10:09:36.122916 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:09:36 crc kubenswrapper[4854]: I1125 10:09:36.123383 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 10:09:36 crc kubenswrapper[4854]: I1125 10:09:36.123716 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 10:09:36 crc kubenswrapper[4854]: I1125 10:09:36.129389 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b6lkz"] Nov 25 10:09:36 crc kubenswrapper[4854]: I1125 10:09:36.164227 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/73c864f0-2662-48ed-9d9c-f04c714af11f-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-b6lkz\" (UID: \"73c864f0-2662-48ed-9d9c-f04c714af11f\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b6lkz" Nov 25 10:09:36 crc kubenswrapper[4854]: I1125 10:09:36.164566 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/73c864f0-2662-48ed-9d9c-f04c714af11f-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-b6lkz\" (UID: \"73c864f0-2662-48ed-9d9c-f04c714af11f\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b6lkz" Nov 25 10:09:36 crc kubenswrapper[4854]: I1125 10:09:36.164903 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-689w9\" (UniqueName: \"kubernetes.io/projected/73c864f0-2662-48ed-9d9c-f04c714af11f-kube-api-access-689w9\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-b6lkz\" (UID: \"73c864f0-2662-48ed-9d9c-f04c714af11f\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b6lkz" Nov 25 10:09:36 crc kubenswrapper[4854]: I1125 10:09:36.265722 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/73c864f0-2662-48ed-9d9c-f04c714af11f-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-b6lkz\" (UID: \"73c864f0-2662-48ed-9d9c-f04c714af11f\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b6lkz" Nov 25 10:09:36 crc kubenswrapper[4854]: I1125 10:09:36.265782 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/73c864f0-2662-48ed-9d9c-f04c714af11f-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-b6lkz\" (UID: \"73c864f0-2662-48ed-9d9c-f04c714af11f\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b6lkz" Nov 25 10:09:36 crc kubenswrapper[4854]: I1125 10:09:36.265846 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-689w9\" (UniqueName: \"kubernetes.io/projected/73c864f0-2662-48ed-9d9c-f04c714af11f-kube-api-access-689w9\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-b6lkz\" (UID: \"73c864f0-2662-48ed-9d9c-f04c714af11f\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b6lkz" Nov 25 10:09:36 crc kubenswrapper[4854]: I1125 10:09:36.273539 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/73c864f0-2662-48ed-9d9c-f04c714af11f-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-b6lkz\" (UID: \"73c864f0-2662-48ed-9d9c-f04c714af11f\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b6lkz" Nov 25 10:09:36 crc kubenswrapper[4854]: I1125 10:09:36.280336 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/73c864f0-2662-48ed-9d9c-f04c714af11f-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-b6lkz\" (UID: \"73c864f0-2662-48ed-9d9c-f04c714af11f\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b6lkz" Nov 25 10:09:36 crc kubenswrapper[4854]: I1125 10:09:36.284058 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-689w9\" (UniqueName: \"kubernetes.io/projected/73c864f0-2662-48ed-9d9c-f04c714af11f-kube-api-access-689w9\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-b6lkz\" (UID: \"73c864f0-2662-48ed-9d9c-f04c714af11f\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b6lkz" Nov 25 10:09:36 crc kubenswrapper[4854]: I1125 10:09:36.480070 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b6lkz" Nov 25 10:09:37 crc kubenswrapper[4854]: I1125 10:09:37.032146 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57612c0b-691c-44c3-8de6-9683360a505a" path="/var/lib/kubelet/pods/57612c0b-691c-44c3-8de6-9683360a505a/volumes" Nov 25 10:09:37 crc kubenswrapper[4854]: W1125 10:09:37.079055 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod73c864f0_2662_48ed_9d9c_f04c714af11f.slice/crio-ca42da2b37de3dfa857a4b6ca819668f8f62f4eebfb474c24e6c64d43f0a17a3 WatchSource:0}: Error finding container ca42da2b37de3dfa857a4b6ca819668f8f62f4eebfb474c24e6c64d43f0a17a3: Status 404 returned error can't find the container with id ca42da2b37de3dfa857a4b6ca819668f8f62f4eebfb474c24e6c64d43f0a17a3 Nov 25 10:09:37 crc kubenswrapper[4854]: I1125 10:09:37.088795 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b6lkz"] Nov 25 10:09:38 crc kubenswrapper[4854]: I1125 10:09:38.052829 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b6lkz" event={"ID":"73c864f0-2662-48ed-9d9c-f04c714af11f","Type":"ContainerStarted","Data":"d4b0a7f31f10c380cbd257947eade031e0c3c9944f4d8ed7831836379bc2a897"} Nov 25 10:09:38 crc kubenswrapper[4854]: I1125 10:09:38.053362 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b6lkz" event={"ID":"73c864f0-2662-48ed-9d9c-f04c714af11f","Type":"ContainerStarted","Data":"ca42da2b37de3dfa857a4b6ca819668f8f62f4eebfb474c24e6c64d43f0a17a3"} Nov 25 10:09:38 crc kubenswrapper[4854]: I1125 10:09:38.070501 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b6lkz" podStartSLOduration=1.583775537 podStartE2EDuration="2.07047625s" podCreationTimestamp="2025-11-25 10:09:36 +0000 UTC" firstStartedPulling="2025-11-25 10:09:37.081168257 +0000 UTC m=+1982.934161633" lastFinishedPulling="2025-11-25 10:09:37.56786897 +0000 UTC m=+1983.420862346" observedRunningTime="2025-11-25 10:09:38.069336769 +0000 UTC m=+1983.922330145" watchObservedRunningTime="2025-11-25 10:09:38.07047625 +0000 UTC m=+1983.923469646" Nov 25 10:09:38 crc kubenswrapper[4854]: I1125 10:09:38.985368 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-p9p7p"] Nov 25 10:09:38 crc kubenswrapper[4854]: I1125 10:09:38.988652 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-p9p7p" Nov 25 10:09:39 crc kubenswrapper[4854]: I1125 10:09:39.006427 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-p9p7p"] Nov 25 10:09:39 crc kubenswrapper[4854]: I1125 10:09:39.049121 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8f8sk\" (UniqueName: \"kubernetes.io/projected/f9efdc34-f165-45ee-ad53-0d8907f79735-kube-api-access-8f8sk\") pod \"community-operators-p9p7p\" (UID: \"f9efdc34-f165-45ee-ad53-0d8907f79735\") " pod="openshift-marketplace/community-operators-p9p7p" Nov 25 10:09:39 crc kubenswrapper[4854]: I1125 10:09:39.049186 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9efdc34-f165-45ee-ad53-0d8907f79735-catalog-content\") pod \"community-operators-p9p7p\" (UID: \"f9efdc34-f165-45ee-ad53-0d8907f79735\") " pod="openshift-marketplace/community-operators-p9p7p" Nov 25 10:09:39 crc kubenswrapper[4854]: I1125 10:09:39.049425 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9efdc34-f165-45ee-ad53-0d8907f79735-utilities\") pod \"community-operators-p9p7p\" (UID: \"f9efdc34-f165-45ee-ad53-0d8907f79735\") " pod="openshift-marketplace/community-operators-p9p7p" Nov 25 10:09:39 crc kubenswrapper[4854]: I1125 10:09:39.151015 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8f8sk\" (UniqueName: \"kubernetes.io/projected/f9efdc34-f165-45ee-ad53-0d8907f79735-kube-api-access-8f8sk\") pod \"community-operators-p9p7p\" (UID: \"f9efdc34-f165-45ee-ad53-0d8907f79735\") " pod="openshift-marketplace/community-operators-p9p7p" Nov 25 10:09:39 crc kubenswrapper[4854]: I1125 10:09:39.151294 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9efdc34-f165-45ee-ad53-0d8907f79735-catalog-content\") pod \"community-operators-p9p7p\" (UID: \"f9efdc34-f165-45ee-ad53-0d8907f79735\") " pod="openshift-marketplace/community-operators-p9p7p" Nov 25 10:09:39 crc kubenswrapper[4854]: I1125 10:09:39.151509 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9efdc34-f165-45ee-ad53-0d8907f79735-utilities\") pod \"community-operators-p9p7p\" (UID: \"f9efdc34-f165-45ee-ad53-0d8907f79735\") " pod="openshift-marketplace/community-operators-p9p7p" Nov 25 10:09:39 crc kubenswrapper[4854]: I1125 10:09:39.151793 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9efdc34-f165-45ee-ad53-0d8907f79735-catalog-content\") pod \"community-operators-p9p7p\" (UID: \"f9efdc34-f165-45ee-ad53-0d8907f79735\") " pod="openshift-marketplace/community-operators-p9p7p" Nov 25 10:09:39 crc kubenswrapper[4854]: I1125 10:09:39.151873 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9efdc34-f165-45ee-ad53-0d8907f79735-utilities\") pod \"community-operators-p9p7p\" (UID: \"f9efdc34-f165-45ee-ad53-0d8907f79735\") " pod="openshift-marketplace/community-operators-p9p7p" Nov 25 10:09:39 crc kubenswrapper[4854]: I1125 10:09:39.180884 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8f8sk\" (UniqueName: \"kubernetes.io/projected/f9efdc34-f165-45ee-ad53-0d8907f79735-kube-api-access-8f8sk\") pod \"community-operators-p9p7p\" (UID: \"f9efdc34-f165-45ee-ad53-0d8907f79735\") " pod="openshift-marketplace/community-operators-p9p7p" Nov 25 10:09:39 crc kubenswrapper[4854]: I1125 10:09:39.308920 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-p9p7p" Nov 25 10:09:39 crc kubenswrapper[4854]: I1125 10:09:39.792387 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-ptj86" Nov 25 10:09:39 crc kubenswrapper[4854]: I1125 10:09:39.793004 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-ptj86" Nov 25 10:09:39 crc kubenswrapper[4854]: I1125 10:09:39.815389 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-p9p7p"] Nov 25 10:09:39 crc kubenswrapper[4854]: I1125 10:09:39.851139 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-ptj86" Nov 25 10:09:40 crc kubenswrapper[4854]: I1125 10:09:40.081863 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p9p7p" event={"ID":"f9efdc34-f165-45ee-ad53-0d8907f79735","Type":"ContainerStarted","Data":"85bc30f5bd16be7c222da55236b77cebc76ee5aba600349299ceabd3968b6b4d"} Nov 25 10:09:40 crc kubenswrapper[4854]: I1125 10:09:40.180046 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-ptj86" Nov 25 10:09:41 crc kubenswrapper[4854]: I1125 10:09:41.094895 4854 generic.go:334] "Generic (PLEG): container finished" podID="f9efdc34-f165-45ee-ad53-0d8907f79735" containerID="e30b3e643e155c6e9a65d56474c8abeb48280549ff485273636cd0f128c6dd2e" exitCode=0 Nov 25 10:09:41 crc kubenswrapper[4854]: I1125 10:09:41.094952 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p9p7p" event={"ID":"f9efdc34-f165-45ee-ad53-0d8907f79735","Type":"ContainerDied","Data":"e30b3e643e155c6e9a65d56474c8abeb48280549ff485273636cd0f128c6dd2e"} Nov 25 10:09:42 crc kubenswrapper[4854]: I1125 10:09:42.108154 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p9p7p" event={"ID":"f9efdc34-f165-45ee-ad53-0d8907f79735","Type":"ContainerStarted","Data":"5065aa40c436f213f2cb4fb65a9a835c34b236b51c74b6bddb34f510212dc26a"} Nov 25 10:09:42 crc kubenswrapper[4854]: I1125 10:09:42.177001 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ptj86"] Nov 25 10:09:43 crc kubenswrapper[4854]: I1125 10:09:43.133127 4854 generic.go:334] "Generic (PLEG): container finished" podID="f9efdc34-f165-45ee-ad53-0d8907f79735" containerID="5065aa40c436f213f2cb4fb65a9a835c34b236b51c74b6bddb34f510212dc26a" exitCode=0 Nov 25 10:09:43 crc kubenswrapper[4854]: I1125 10:09:43.134034 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-ptj86" podUID="d7354e94-ef6d-491d-bc73-ff14126eae41" containerName="registry-server" containerID="cri-o://25dd8d6402c7615512f77f7d9841d21858acefcec2d2e02f45e7aae9706300f6" gracePeriod=2 Nov 25 10:09:43 crc kubenswrapper[4854]: I1125 10:09:43.134160 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p9p7p" event={"ID":"f9efdc34-f165-45ee-ad53-0d8907f79735","Type":"ContainerDied","Data":"5065aa40c436f213f2cb4fb65a9a835c34b236b51c74b6bddb34f510212dc26a"} Nov 25 10:09:44 crc kubenswrapper[4854]: I1125 10:09:44.148902 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p9p7p" event={"ID":"f9efdc34-f165-45ee-ad53-0d8907f79735","Type":"ContainerStarted","Data":"5bd2117814788b2071a989bedab7f74e6557bce6213d6ded05c5fa525b184116"} Nov 25 10:09:44 crc kubenswrapper[4854]: I1125 10:09:44.151945 4854 generic.go:334] "Generic (PLEG): container finished" podID="d7354e94-ef6d-491d-bc73-ff14126eae41" containerID="25dd8d6402c7615512f77f7d9841d21858acefcec2d2e02f45e7aae9706300f6" exitCode=0 Nov 25 10:09:44 crc kubenswrapper[4854]: I1125 10:09:44.151971 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ptj86" event={"ID":"d7354e94-ef6d-491d-bc73-ff14126eae41","Type":"ContainerDied","Data":"25dd8d6402c7615512f77f7d9841d21858acefcec2d2e02f45e7aae9706300f6"} Nov 25 10:09:44 crc kubenswrapper[4854]: I1125 10:09:44.167331 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-p9p7p" podStartSLOduration=3.376980378 podStartE2EDuration="6.167312823s" podCreationTimestamp="2025-11-25 10:09:38 +0000 UTC" firstStartedPulling="2025-11-25 10:09:41.096826846 +0000 UTC m=+1986.949820222" lastFinishedPulling="2025-11-25 10:09:43.887159291 +0000 UTC m=+1989.740152667" observedRunningTime="2025-11-25 10:09:44.166623204 +0000 UTC m=+1990.019616590" watchObservedRunningTime="2025-11-25 10:09:44.167312823 +0000 UTC m=+1990.020306209" Nov 25 10:09:44 crc kubenswrapper[4854]: I1125 10:09:44.805459 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ptj86" Nov 25 10:09:44 crc kubenswrapper[4854]: I1125 10:09:44.921410 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d7354e94-ef6d-491d-bc73-ff14126eae41-catalog-content\") pod \"d7354e94-ef6d-491d-bc73-ff14126eae41\" (UID: \"d7354e94-ef6d-491d-bc73-ff14126eae41\") " Nov 25 10:09:44 crc kubenswrapper[4854]: I1125 10:09:44.921927 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d7354e94-ef6d-491d-bc73-ff14126eae41-utilities\") pod \"d7354e94-ef6d-491d-bc73-ff14126eae41\" (UID: \"d7354e94-ef6d-491d-bc73-ff14126eae41\") " Nov 25 10:09:44 crc kubenswrapper[4854]: I1125 10:09:44.922122 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xvzh4\" (UniqueName: \"kubernetes.io/projected/d7354e94-ef6d-491d-bc73-ff14126eae41-kube-api-access-xvzh4\") pod \"d7354e94-ef6d-491d-bc73-ff14126eae41\" (UID: \"d7354e94-ef6d-491d-bc73-ff14126eae41\") " Nov 25 10:09:44 crc kubenswrapper[4854]: I1125 10:09:44.922684 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d7354e94-ef6d-491d-bc73-ff14126eae41-utilities" (OuterVolumeSpecName: "utilities") pod "d7354e94-ef6d-491d-bc73-ff14126eae41" (UID: "d7354e94-ef6d-491d-bc73-ff14126eae41"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:09:44 crc kubenswrapper[4854]: I1125 10:09:44.923093 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d7354e94-ef6d-491d-bc73-ff14126eae41-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:44 crc kubenswrapper[4854]: I1125 10:09:44.927799 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d7354e94-ef6d-491d-bc73-ff14126eae41-kube-api-access-xvzh4" (OuterVolumeSpecName: "kube-api-access-xvzh4") pod "d7354e94-ef6d-491d-bc73-ff14126eae41" (UID: "d7354e94-ef6d-491d-bc73-ff14126eae41"). InnerVolumeSpecName "kube-api-access-xvzh4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:09:44 crc kubenswrapper[4854]: I1125 10:09:44.942405 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d7354e94-ef6d-491d-bc73-ff14126eae41-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d7354e94-ef6d-491d-bc73-ff14126eae41" (UID: "d7354e94-ef6d-491d-bc73-ff14126eae41"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:09:45 crc kubenswrapper[4854]: I1125 10:09:45.024819 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d7354e94-ef6d-491d-bc73-ff14126eae41-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:45 crc kubenswrapper[4854]: I1125 10:09:45.025354 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xvzh4\" (UniqueName: \"kubernetes.io/projected/d7354e94-ef6d-491d-bc73-ff14126eae41-kube-api-access-xvzh4\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:45 crc kubenswrapper[4854]: I1125 10:09:45.168022 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ptj86" Nov 25 10:09:45 crc kubenswrapper[4854]: I1125 10:09:45.168063 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ptj86" event={"ID":"d7354e94-ef6d-491d-bc73-ff14126eae41","Type":"ContainerDied","Data":"cdfd67ec883c7f85fb5131cd48225831a420a36613f753785f7023d1dc5dc758"} Nov 25 10:09:45 crc kubenswrapper[4854]: I1125 10:09:45.168093 4854 scope.go:117] "RemoveContainer" containerID="25dd8d6402c7615512f77f7d9841d21858acefcec2d2e02f45e7aae9706300f6" Nov 25 10:09:45 crc kubenswrapper[4854]: I1125 10:09:45.215035 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ptj86"] Nov 25 10:09:45 crc kubenswrapper[4854]: I1125 10:09:45.218442 4854 scope.go:117] "RemoveContainer" containerID="132cd11cfdbee1cc0517dd81b62d2d1eeaf2fdea2ecc46c03c5074962f24c467" Nov 25 10:09:45 crc kubenswrapper[4854]: E1125 10:09:45.233339 4854 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd7354e94_ef6d_491d_bc73_ff14126eae41.slice/crio-cdfd67ec883c7f85fb5131cd48225831a420a36613f753785f7023d1dc5dc758\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd7354e94_ef6d_491d_bc73_ff14126eae41.slice\": RecentStats: unable to find data in memory cache]" Nov 25 10:09:45 crc kubenswrapper[4854]: I1125 10:09:45.245250 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-ptj86"] Nov 25 10:09:45 crc kubenswrapper[4854]: I1125 10:09:45.286760 4854 scope.go:117] "RemoveContainer" containerID="aa4769415b52fa23f9d521399c635055a691f725fdf5e3022ac46532452d7144" Nov 25 10:09:47 crc kubenswrapper[4854]: I1125 10:09:47.029486 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d7354e94-ef6d-491d-bc73-ff14126eae41" path="/var/lib/kubelet/pods/d7354e94-ef6d-491d-bc73-ff14126eae41/volumes" Nov 25 10:09:49 crc kubenswrapper[4854]: I1125 10:09:49.309606 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-p9p7p" Nov 25 10:09:49 crc kubenswrapper[4854]: I1125 10:09:49.310042 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-p9p7p" Nov 25 10:09:49 crc kubenswrapper[4854]: I1125 10:09:49.379430 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-p9p7p" Nov 25 10:09:50 crc kubenswrapper[4854]: I1125 10:09:50.344964 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-p9p7p" Nov 25 10:09:50 crc kubenswrapper[4854]: I1125 10:09:50.398704 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-p9p7p"] Nov 25 10:09:52 crc kubenswrapper[4854]: I1125 10:09:52.301260 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-p9p7p" podUID="f9efdc34-f165-45ee-ad53-0d8907f79735" containerName="registry-server" containerID="cri-o://5bd2117814788b2071a989bedab7f74e6557bce6213d6ded05c5fa525b184116" gracePeriod=2 Nov 25 10:09:52 crc kubenswrapper[4854]: I1125 10:09:52.858982 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-p9p7p" Nov 25 10:09:52 crc kubenswrapper[4854]: I1125 10:09:52.965184 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8f8sk\" (UniqueName: \"kubernetes.io/projected/f9efdc34-f165-45ee-ad53-0d8907f79735-kube-api-access-8f8sk\") pod \"f9efdc34-f165-45ee-ad53-0d8907f79735\" (UID: \"f9efdc34-f165-45ee-ad53-0d8907f79735\") " Nov 25 10:09:52 crc kubenswrapper[4854]: I1125 10:09:52.965593 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9efdc34-f165-45ee-ad53-0d8907f79735-catalog-content\") pod \"f9efdc34-f165-45ee-ad53-0d8907f79735\" (UID: \"f9efdc34-f165-45ee-ad53-0d8907f79735\") " Nov 25 10:09:52 crc kubenswrapper[4854]: I1125 10:09:52.965924 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9efdc34-f165-45ee-ad53-0d8907f79735-utilities\") pod \"f9efdc34-f165-45ee-ad53-0d8907f79735\" (UID: \"f9efdc34-f165-45ee-ad53-0d8907f79735\") " Nov 25 10:09:52 crc kubenswrapper[4854]: I1125 10:09:52.966855 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f9efdc34-f165-45ee-ad53-0d8907f79735-utilities" (OuterVolumeSpecName: "utilities") pod "f9efdc34-f165-45ee-ad53-0d8907f79735" (UID: "f9efdc34-f165-45ee-ad53-0d8907f79735"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:09:52 crc kubenswrapper[4854]: I1125 10:09:52.972994 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f9efdc34-f165-45ee-ad53-0d8907f79735-kube-api-access-8f8sk" (OuterVolumeSpecName: "kube-api-access-8f8sk") pod "f9efdc34-f165-45ee-ad53-0d8907f79735" (UID: "f9efdc34-f165-45ee-ad53-0d8907f79735"). InnerVolumeSpecName "kube-api-access-8f8sk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:09:53 crc kubenswrapper[4854]: I1125 10:09:53.024111 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f9efdc34-f165-45ee-ad53-0d8907f79735-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f9efdc34-f165-45ee-ad53-0d8907f79735" (UID: "f9efdc34-f165-45ee-ad53-0d8907f79735"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:09:53 crc kubenswrapper[4854]: I1125 10:09:53.068961 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9efdc34-f165-45ee-ad53-0d8907f79735-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:53 crc kubenswrapper[4854]: I1125 10:09:53.068996 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8f8sk\" (UniqueName: \"kubernetes.io/projected/f9efdc34-f165-45ee-ad53-0d8907f79735-kube-api-access-8f8sk\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:53 crc kubenswrapper[4854]: I1125 10:09:53.069036 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9efdc34-f165-45ee-ad53-0d8907f79735-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:53 crc kubenswrapper[4854]: I1125 10:09:53.317471 4854 generic.go:334] "Generic (PLEG): container finished" podID="f9efdc34-f165-45ee-ad53-0d8907f79735" containerID="5bd2117814788b2071a989bedab7f74e6557bce6213d6ded05c5fa525b184116" exitCode=0 Nov 25 10:09:53 crc kubenswrapper[4854]: I1125 10:09:53.317540 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p9p7p" event={"ID":"f9efdc34-f165-45ee-ad53-0d8907f79735","Type":"ContainerDied","Data":"5bd2117814788b2071a989bedab7f74e6557bce6213d6ded05c5fa525b184116"} Nov 25 10:09:53 crc kubenswrapper[4854]: I1125 10:09:53.317583 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p9p7p" event={"ID":"f9efdc34-f165-45ee-ad53-0d8907f79735","Type":"ContainerDied","Data":"85bc30f5bd16be7c222da55236b77cebc76ee5aba600349299ceabd3968b6b4d"} Nov 25 10:09:53 crc kubenswrapper[4854]: I1125 10:09:53.317614 4854 scope.go:117] "RemoveContainer" containerID="5bd2117814788b2071a989bedab7f74e6557bce6213d6ded05c5fa525b184116" Nov 25 10:09:53 crc kubenswrapper[4854]: I1125 10:09:53.317898 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-p9p7p" Nov 25 10:09:53 crc kubenswrapper[4854]: I1125 10:09:53.372418 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-p9p7p"] Nov 25 10:09:53 crc kubenswrapper[4854]: I1125 10:09:53.391012 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-p9p7p"] Nov 25 10:09:53 crc kubenswrapper[4854]: I1125 10:09:53.394358 4854 scope.go:117] "RemoveContainer" containerID="5065aa40c436f213f2cb4fb65a9a835c34b236b51c74b6bddb34f510212dc26a" Nov 25 10:09:53 crc kubenswrapper[4854]: I1125 10:09:53.430930 4854 scope.go:117] "RemoveContainer" containerID="e30b3e643e155c6e9a65d56474c8abeb48280549ff485273636cd0f128c6dd2e" Nov 25 10:09:53 crc kubenswrapper[4854]: I1125 10:09:53.500558 4854 scope.go:117] "RemoveContainer" containerID="5bd2117814788b2071a989bedab7f74e6557bce6213d6ded05c5fa525b184116" Nov 25 10:09:53 crc kubenswrapper[4854]: E1125 10:09:53.501212 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5bd2117814788b2071a989bedab7f74e6557bce6213d6ded05c5fa525b184116\": container with ID starting with 5bd2117814788b2071a989bedab7f74e6557bce6213d6ded05c5fa525b184116 not found: ID does not exist" containerID="5bd2117814788b2071a989bedab7f74e6557bce6213d6ded05c5fa525b184116" Nov 25 10:09:53 crc kubenswrapper[4854]: I1125 10:09:53.501268 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5bd2117814788b2071a989bedab7f74e6557bce6213d6ded05c5fa525b184116"} err="failed to get container status \"5bd2117814788b2071a989bedab7f74e6557bce6213d6ded05c5fa525b184116\": rpc error: code = NotFound desc = could not find container \"5bd2117814788b2071a989bedab7f74e6557bce6213d6ded05c5fa525b184116\": container with ID starting with 5bd2117814788b2071a989bedab7f74e6557bce6213d6ded05c5fa525b184116 not found: ID does not exist" Nov 25 10:09:53 crc kubenswrapper[4854]: I1125 10:09:53.501316 4854 scope.go:117] "RemoveContainer" containerID="5065aa40c436f213f2cb4fb65a9a835c34b236b51c74b6bddb34f510212dc26a" Nov 25 10:09:53 crc kubenswrapper[4854]: E1125 10:09:53.502038 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5065aa40c436f213f2cb4fb65a9a835c34b236b51c74b6bddb34f510212dc26a\": container with ID starting with 5065aa40c436f213f2cb4fb65a9a835c34b236b51c74b6bddb34f510212dc26a not found: ID does not exist" containerID="5065aa40c436f213f2cb4fb65a9a835c34b236b51c74b6bddb34f510212dc26a" Nov 25 10:09:53 crc kubenswrapper[4854]: I1125 10:09:53.502086 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5065aa40c436f213f2cb4fb65a9a835c34b236b51c74b6bddb34f510212dc26a"} err="failed to get container status \"5065aa40c436f213f2cb4fb65a9a835c34b236b51c74b6bddb34f510212dc26a\": rpc error: code = NotFound desc = could not find container \"5065aa40c436f213f2cb4fb65a9a835c34b236b51c74b6bddb34f510212dc26a\": container with ID starting with 5065aa40c436f213f2cb4fb65a9a835c34b236b51c74b6bddb34f510212dc26a not found: ID does not exist" Nov 25 10:09:53 crc kubenswrapper[4854]: I1125 10:09:53.502120 4854 scope.go:117] "RemoveContainer" containerID="e30b3e643e155c6e9a65d56474c8abeb48280549ff485273636cd0f128c6dd2e" Nov 25 10:09:53 crc kubenswrapper[4854]: E1125 10:09:53.506015 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e30b3e643e155c6e9a65d56474c8abeb48280549ff485273636cd0f128c6dd2e\": container with ID starting with e30b3e643e155c6e9a65d56474c8abeb48280549ff485273636cd0f128c6dd2e not found: ID does not exist" containerID="e30b3e643e155c6e9a65d56474c8abeb48280549ff485273636cd0f128c6dd2e" Nov 25 10:09:53 crc kubenswrapper[4854]: I1125 10:09:53.506070 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e30b3e643e155c6e9a65d56474c8abeb48280549ff485273636cd0f128c6dd2e"} err="failed to get container status \"e30b3e643e155c6e9a65d56474c8abeb48280549ff485273636cd0f128c6dd2e\": rpc error: code = NotFound desc = could not find container \"e30b3e643e155c6e9a65d56474c8abeb48280549ff485273636cd0f128c6dd2e\": container with ID starting with e30b3e643e155c6e9a65d56474c8abeb48280549ff485273636cd0f128c6dd2e not found: ID does not exist" Nov 25 10:09:54 crc kubenswrapper[4854]: I1125 10:09:54.678235 4854 scope.go:117] "RemoveContainer" containerID="ed75b623deadff67db66cc75a930a45b815889596ca105d7959096e1f197f93d" Nov 25 10:09:54 crc kubenswrapper[4854]: I1125 10:09:54.716511 4854 scope.go:117] "RemoveContainer" containerID="4c74dced21ec183c10db63707e93a19cdc6f123ef3543bba8f2236f96fd253c2" Nov 25 10:09:55 crc kubenswrapper[4854]: I1125 10:09:55.024585 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f9efdc34-f165-45ee-ad53-0d8907f79735" path="/var/lib/kubelet/pods/f9efdc34-f165-45ee-ad53-0d8907f79735/volumes" Nov 25 10:09:55 crc kubenswrapper[4854]: I1125 10:09:55.052091 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-dbac-account-create-kk5zc"] Nov 25 10:09:55 crc kubenswrapper[4854]: I1125 10:09:55.063928 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-dbac-account-create-kk5zc"] Nov 25 10:09:56 crc kubenswrapper[4854]: I1125 10:09:56.045432 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-swlpj"] Nov 25 10:09:56 crc kubenswrapper[4854]: I1125 10:09:56.057272 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-6hbdw"] Nov 25 10:09:56 crc kubenswrapper[4854]: I1125 10:09:56.067757 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-6hbdw"] Nov 25 10:09:56 crc kubenswrapper[4854]: I1125 10:09:56.078958 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-swlpj"] Nov 25 10:09:57 crc kubenswrapper[4854]: I1125 10:09:57.026509 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37479b5f-7d9e-4202-8807-6442cf079a33" path="/var/lib/kubelet/pods/37479b5f-7d9e-4202-8807-6442cf079a33/volumes" Nov 25 10:09:57 crc kubenswrapper[4854]: I1125 10:09:57.027481 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="affbd45c-173c-492f-b047-3a5db0988607" path="/var/lib/kubelet/pods/affbd45c-173c-492f-b047-3a5db0988607/volumes" Nov 25 10:09:57 crc kubenswrapper[4854]: I1125 10:09:57.028890 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c6d04f91-08d5-484d-98a1-a1b1fc315df4" path="/var/lib/kubelet/pods/c6d04f91-08d5-484d-98a1-a1b1fc315df4/volumes" Nov 25 10:09:58 crc kubenswrapper[4854]: I1125 10:09:58.054134 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-8rxxf"] Nov 25 10:09:58 crc kubenswrapper[4854]: I1125 10:09:58.069430 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-daab-account-create-5pd4d"] Nov 25 10:09:58 crc kubenswrapper[4854]: I1125 10:09:58.079397 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-7fef-account-create-np5sq"] Nov 25 10:09:58 crc kubenswrapper[4854]: I1125 10:09:58.088076 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-8rxxf"] Nov 25 10:09:58 crc kubenswrapper[4854]: I1125 10:09:58.097771 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-daab-account-create-5pd4d"] Nov 25 10:09:58 crc kubenswrapper[4854]: I1125 10:09:58.108951 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-7fef-account-create-np5sq"] Nov 25 10:09:59 crc kubenswrapper[4854]: I1125 10:09:59.025788 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01b1d826-91f3-4136-b30a-f48b2e6934a9" path="/var/lib/kubelet/pods/01b1d826-91f3-4136-b30a-f48b2e6934a9/volumes" Nov 25 10:09:59 crc kubenswrapper[4854]: I1125 10:09:59.026657 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2822fcff-eadb-4d68-9297-2940d4573bc7" path="/var/lib/kubelet/pods/2822fcff-eadb-4d68-9297-2940d4573bc7/volumes" Nov 25 10:09:59 crc kubenswrapper[4854]: I1125 10:09:59.028071 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d9164e45-ade8-4f78-b89e-e6e3b61e1a4e" path="/var/lib/kubelet/pods/d9164e45-ade8-4f78-b89e-e6e3b61e1a4e/volumes" Nov 25 10:10:25 crc kubenswrapper[4854]: I1125 10:10:25.029148 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:10:25 crc kubenswrapper[4854]: I1125 10:10:25.029917 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:10:28 crc kubenswrapper[4854]: I1125 10:10:28.071751 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-dr9x4"] Nov 25 10:10:28 crc kubenswrapper[4854]: I1125 10:10:28.086591 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-dr9x4"] Nov 25 10:10:29 crc kubenswrapper[4854]: I1125 10:10:29.027670 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a05bbad4-447a-452c-8254-d2f0a2ea9fdb" path="/var/lib/kubelet/pods/a05bbad4-447a-452c-8254-d2f0a2ea9fdb/volumes" Nov 25 10:10:35 crc kubenswrapper[4854]: I1125 10:10:35.045160 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-create-tg6kv"] Nov 25 10:10:35 crc kubenswrapper[4854]: I1125 10:10:35.060663 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-create-tg6kv"] Nov 25 10:10:36 crc kubenswrapper[4854]: I1125 10:10:36.037755 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-96f1-account-create-2dr75"] Nov 25 10:10:36 crc kubenswrapper[4854]: I1125 10:10:36.048570 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-96f1-account-create-2dr75"] Nov 25 10:10:37 crc kubenswrapper[4854]: I1125 10:10:37.027650 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="94d00e2e-b111-4b29-8ba5-aca5ed86028a" path="/var/lib/kubelet/pods/94d00e2e-b111-4b29-8ba5-aca5ed86028a/volumes" Nov 25 10:10:37 crc kubenswrapper[4854]: I1125 10:10:37.031155 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a16432da-436b-4d4c-b383-e009ff4a4ff6" path="/var/lib/kubelet/pods/a16432da-436b-4d4c-b383-e009ff4a4ff6/volumes" Nov 25 10:10:54 crc kubenswrapper[4854]: I1125 10:10:54.051094 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-wg4tc"] Nov 25 10:10:54 crc kubenswrapper[4854]: I1125 10:10:54.072600 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-wg4tc"] Nov 25 10:10:54 crc kubenswrapper[4854]: I1125 10:10:54.913638 4854 scope.go:117] "RemoveContainer" containerID="9b30c9076b3fa761b2b41c56f8333f28124f0b8c191b8351cce45674e79ca7e5" Nov 25 10:10:54 crc kubenswrapper[4854]: I1125 10:10:54.948450 4854 scope.go:117] "RemoveContainer" containerID="dbb617cf9bad852c619e8a70e79b8d94465b864ab151f06fa54fade7929f52f1" Nov 25 10:10:55 crc kubenswrapper[4854]: I1125 10:10:55.027194 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b3722d7b-30f5-414d-b947-9be5a8494449" path="/var/lib/kubelet/pods/b3722d7b-30f5-414d-b947-9be5a8494449/volumes" Nov 25 10:10:55 crc kubenswrapper[4854]: I1125 10:10:55.028881 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:10:55 crc kubenswrapper[4854]: I1125 10:10:55.028947 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:10:55 crc kubenswrapper[4854]: I1125 10:10:55.053929 4854 scope.go:117] "RemoveContainer" containerID="dfea7521c7f1ee5364874d541960caedb2f5b5922d180a9f6a4d4a060628aa2e" Nov 25 10:10:55 crc kubenswrapper[4854]: I1125 10:10:55.083161 4854 scope.go:117] "RemoveContainer" containerID="3db0c6c8660a37316f2bae6598892fc702d2d88bfb66027906dc88fd44506139" Nov 25 10:10:55 crc kubenswrapper[4854]: I1125 10:10:55.140085 4854 scope.go:117] "RemoveContainer" containerID="4d00bcdabf49f587df89080c98c8ffeab9b987293a55257c3ad6d8df072b3768" Nov 25 10:10:55 crc kubenswrapper[4854]: I1125 10:10:55.199624 4854 scope.go:117] "RemoveContainer" containerID="b0d047cca42fd9d3599491104df3f727dc0566c60422fcde475d13bdfb653ff4" Nov 25 10:10:55 crc kubenswrapper[4854]: I1125 10:10:55.272967 4854 scope.go:117] "RemoveContainer" containerID="f5c47cb81bfbf5630be07e57e0b2d0ef8a3b346a79db8bd438b45183acbe3bde" Nov 25 10:10:55 crc kubenswrapper[4854]: I1125 10:10:55.299313 4854 scope.go:117] "RemoveContainer" containerID="28baefc9c156e4f2be20de528e25a47ed40d871854d383c7d33dcf83a162b73c" Nov 25 10:10:55 crc kubenswrapper[4854]: I1125 10:10:55.326391 4854 scope.go:117] "RemoveContainer" containerID="99c0778f593da4ff167bce1ee9650bf267fd5b2bf236db328efee2554568c31f" Nov 25 10:10:55 crc kubenswrapper[4854]: I1125 10:10:55.359354 4854 scope.go:117] "RemoveContainer" containerID="1399f465cada421ee03e69385fe6686d9b35d4d927864d5a9f945b6b3e1d2a93" Nov 25 10:11:00 crc kubenswrapper[4854]: I1125 10:11:00.118539 4854 generic.go:334] "Generic (PLEG): container finished" podID="73c864f0-2662-48ed-9d9c-f04c714af11f" containerID="d4b0a7f31f10c380cbd257947eade031e0c3c9944f4d8ed7831836379bc2a897" exitCode=0 Nov 25 10:11:00 crc kubenswrapper[4854]: I1125 10:11:00.118666 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b6lkz" event={"ID":"73c864f0-2662-48ed-9d9c-f04c714af11f","Type":"ContainerDied","Data":"d4b0a7f31f10c380cbd257947eade031e0c3c9944f4d8ed7831836379bc2a897"} Nov 25 10:11:01 crc kubenswrapper[4854]: I1125 10:11:01.068827 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-6k8ck"] Nov 25 10:11:01 crc kubenswrapper[4854]: I1125 10:11:01.086391 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-6k8ck"] Nov 25 10:11:01 crc kubenswrapper[4854]: I1125 10:11:01.594502 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b6lkz" Nov 25 10:11:01 crc kubenswrapper[4854]: I1125 10:11:01.684611 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/73c864f0-2662-48ed-9d9c-f04c714af11f-inventory\") pod \"73c864f0-2662-48ed-9d9c-f04c714af11f\" (UID: \"73c864f0-2662-48ed-9d9c-f04c714af11f\") " Nov 25 10:11:01 crc kubenswrapper[4854]: I1125 10:11:01.684732 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/73c864f0-2662-48ed-9d9c-f04c714af11f-ssh-key\") pod \"73c864f0-2662-48ed-9d9c-f04c714af11f\" (UID: \"73c864f0-2662-48ed-9d9c-f04c714af11f\") " Nov 25 10:11:01 crc kubenswrapper[4854]: I1125 10:11:01.684839 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-689w9\" (UniqueName: \"kubernetes.io/projected/73c864f0-2662-48ed-9d9c-f04c714af11f-kube-api-access-689w9\") pod \"73c864f0-2662-48ed-9d9c-f04c714af11f\" (UID: \"73c864f0-2662-48ed-9d9c-f04c714af11f\") " Nov 25 10:11:01 crc kubenswrapper[4854]: I1125 10:11:01.693934 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/73c864f0-2662-48ed-9d9c-f04c714af11f-kube-api-access-689w9" (OuterVolumeSpecName: "kube-api-access-689w9") pod "73c864f0-2662-48ed-9d9c-f04c714af11f" (UID: "73c864f0-2662-48ed-9d9c-f04c714af11f"). InnerVolumeSpecName "kube-api-access-689w9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:11:01 crc kubenswrapper[4854]: I1125 10:11:01.718496 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/73c864f0-2662-48ed-9d9c-f04c714af11f-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "73c864f0-2662-48ed-9d9c-f04c714af11f" (UID: "73c864f0-2662-48ed-9d9c-f04c714af11f"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:11:01 crc kubenswrapper[4854]: I1125 10:11:01.719970 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/73c864f0-2662-48ed-9d9c-f04c714af11f-inventory" (OuterVolumeSpecName: "inventory") pod "73c864f0-2662-48ed-9d9c-f04c714af11f" (UID: "73c864f0-2662-48ed-9d9c-f04c714af11f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:11:01 crc kubenswrapper[4854]: I1125 10:11:01.787590 4854 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/73c864f0-2662-48ed-9d9c-f04c714af11f-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:01 crc kubenswrapper[4854]: I1125 10:11:01.787622 4854 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/73c864f0-2662-48ed-9d9c-f04c714af11f-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:01 crc kubenswrapper[4854]: I1125 10:11:01.787634 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-689w9\" (UniqueName: \"kubernetes.io/projected/73c864f0-2662-48ed-9d9c-f04c714af11f-kube-api-access-689w9\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:02 crc kubenswrapper[4854]: I1125 10:11:02.144652 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b6lkz" event={"ID":"73c864f0-2662-48ed-9d9c-f04c714af11f","Type":"ContainerDied","Data":"ca42da2b37de3dfa857a4b6ca819668f8f62f4eebfb474c24e6c64d43f0a17a3"} Nov 25 10:11:02 crc kubenswrapper[4854]: I1125 10:11:02.144818 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ca42da2b37de3dfa857a4b6ca819668f8f62f4eebfb474c24e6c64d43f0a17a3" Nov 25 10:11:02 crc kubenswrapper[4854]: I1125 10:11:02.144756 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b6lkz" Nov 25 10:11:02 crc kubenswrapper[4854]: I1125 10:11:02.242847 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xwnbk"] Nov 25 10:11:02 crc kubenswrapper[4854]: E1125 10:11:02.243557 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9efdc34-f165-45ee-ad53-0d8907f79735" containerName="extract-utilities" Nov 25 10:11:02 crc kubenswrapper[4854]: I1125 10:11:02.243582 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9efdc34-f165-45ee-ad53-0d8907f79735" containerName="extract-utilities" Nov 25 10:11:02 crc kubenswrapper[4854]: E1125 10:11:02.243601 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9efdc34-f165-45ee-ad53-0d8907f79735" containerName="extract-content" Nov 25 10:11:02 crc kubenswrapper[4854]: I1125 10:11:02.243613 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9efdc34-f165-45ee-ad53-0d8907f79735" containerName="extract-content" Nov 25 10:11:02 crc kubenswrapper[4854]: E1125 10:11:02.243637 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7354e94-ef6d-491d-bc73-ff14126eae41" containerName="extract-content" Nov 25 10:11:02 crc kubenswrapper[4854]: I1125 10:11:02.243648 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7354e94-ef6d-491d-bc73-ff14126eae41" containerName="extract-content" Nov 25 10:11:02 crc kubenswrapper[4854]: E1125 10:11:02.243692 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9efdc34-f165-45ee-ad53-0d8907f79735" containerName="registry-server" Nov 25 10:11:02 crc kubenswrapper[4854]: I1125 10:11:02.243706 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9efdc34-f165-45ee-ad53-0d8907f79735" containerName="registry-server" Nov 25 10:11:02 crc kubenswrapper[4854]: E1125 10:11:02.243728 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7354e94-ef6d-491d-bc73-ff14126eae41" containerName="registry-server" Nov 25 10:11:02 crc kubenswrapper[4854]: I1125 10:11:02.243741 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7354e94-ef6d-491d-bc73-ff14126eae41" containerName="registry-server" Nov 25 10:11:02 crc kubenswrapper[4854]: E1125 10:11:02.243756 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73c864f0-2662-48ed-9d9c-f04c714af11f" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 25 10:11:02 crc kubenswrapper[4854]: I1125 10:11:02.243766 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="73c864f0-2662-48ed-9d9c-f04c714af11f" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 25 10:11:02 crc kubenswrapper[4854]: E1125 10:11:02.243816 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7354e94-ef6d-491d-bc73-ff14126eae41" containerName="extract-utilities" Nov 25 10:11:02 crc kubenswrapper[4854]: I1125 10:11:02.243825 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7354e94-ef6d-491d-bc73-ff14126eae41" containerName="extract-utilities" Nov 25 10:11:02 crc kubenswrapper[4854]: I1125 10:11:02.244111 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="f9efdc34-f165-45ee-ad53-0d8907f79735" containerName="registry-server" Nov 25 10:11:02 crc kubenswrapper[4854]: I1125 10:11:02.244155 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="73c864f0-2662-48ed-9d9c-f04c714af11f" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 25 10:11:02 crc kubenswrapper[4854]: I1125 10:11:02.244175 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="d7354e94-ef6d-491d-bc73-ff14126eae41" containerName="registry-server" Nov 25 10:11:02 crc kubenswrapper[4854]: I1125 10:11:02.245259 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xwnbk" Nov 25 10:11:02 crc kubenswrapper[4854]: I1125 10:11:02.248135 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:11:02 crc kubenswrapper[4854]: I1125 10:11:02.248290 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 10:11:02 crc kubenswrapper[4854]: I1125 10:11:02.248446 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 10:11:02 crc kubenswrapper[4854]: I1125 10:11:02.248459 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-6xbdw" Nov 25 10:11:02 crc kubenswrapper[4854]: I1125 10:11:02.257717 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xwnbk"] Nov 25 10:11:02 crc kubenswrapper[4854]: I1125 10:11:02.299264 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0ae9a7d0-e882-4647-94e3-3c41a2b18d1b-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-xwnbk\" (UID: \"0ae9a7d0-e882-4647-94e3-3c41a2b18d1b\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xwnbk" Nov 25 10:11:02 crc kubenswrapper[4854]: I1125 10:11:02.299350 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-49kd2\" (UniqueName: \"kubernetes.io/projected/0ae9a7d0-e882-4647-94e3-3c41a2b18d1b-kube-api-access-49kd2\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-xwnbk\" (UID: \"0ae9a7d0-e882-4647-94e3-3c41a2b18d1b\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xwnbk" Nov 25 10:11:02 crc kubenswrapper[4854]: I1125 10:11:02.299563 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0ae9a7d0-e882-4647-94e3-3c41a2b18d1b-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-xwnbk\" (UID: \"0ae9a7d0-e882-4647-94e3-3c41a2b18d1b\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xwnbk" Nov 25 10:11:02 crc kubenswrapper[4854]: I1125 10:11:02.402102 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0ae9a7d0-e882-4647-94e3-3c41a2b18d1b-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-xwnbk\" (UID: \"0ae9a7d0-e882-4647-94e3-3c41a2b18d1b\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xwnbk" Nov 25 10:11:02 crc kubenswrapper[4854]: I1125 10:11:02.402252 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-49kd2\" (UniqueName: \"kubernetes.io/projected/0ae9a7d0-e882-4647-94e3-3c41a2b18d1b-kube-api-access-49kd2\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-xwnbk\" (UID: \"0ae9a7d0-e882-4647-94e3-3c41a2b18d1b\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xwnbk" Nov 25 10:11:02 crc kubenswrapper[4854]: I1125 10:11:02.402494 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0ae9a7d0-e882-4647-94e3-3c41a2b18d1b-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-xwnbk\" (UID: \"0ae9a7d0-e882-4647-94e3-3c41a2b18d1b\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xwnbk" Nov 25 10:11:02 crc kubenswrapper[4854]: I1125 10:11:02.407868 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0ae9a7d0-e882-4647-94e3-3c41a2b18d1b-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-xwnbk\" (UID: \"0ae9a7d0-e882-4647-94e3-3c41a2b18d1b\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xwnbk" Nov 25 10:11:02 crc kubenswrapper[4854]: I1125 10:11:02.408595 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0ae9a7d0-e882-4647-94e3-3c41a2b18d1b-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-xwnbk\" (UID: \"0ae9a7d0-e882-4647-94e3-3c41a2b18d1b\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xwnbk" Nov 25 10:11:02 crc kubenswrapper[4854]: I1125 10:11:02.428289 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-49kd2\" (UniqueName: \"kubernetes.io/projected/0ae9a7d0-e882-4647-94e3-3c41a2b18d1b-kube-api-access-49kd2\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-xwnbk\" (UID: \"0ae9a7d0-e882-4647-94e3-3c41a2b18d1b\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xwnbk" Nov 25 10:11:02 crc kubenswrapper[4854]: I1125 10:11:02.570333 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xwnbk" Nov 25 10:11:03 crc kubenswrapper[4854]: I1125 10:11:03.028016 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b769df1-897a-4560-a18d-cbb642930a72" path="/var/lib/kubelet/pods/5b769df1-897a-4560-a18d-cbb642930a72/volumes" Nov 25 10:11:03 crc kubenswrapper[4854]: I1125 10:11:03.132956 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xwnbk"] Nov 25 10:11:03 crc kubenswrapper[4854]: I1125 10:11:03.171442 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xwnbk" event={"ID":"0ae9a7d0-e882-4647-94e3-3c41a2b18d1b","Type":"ContainerStarted","Data":"2a01ba54c35d8f4157eb42dbfa93ebe13bc7f05160f4eb9bfa94a9b8d1385e5f"} Nov 25 10:11:04 crc kubenswrapper[4854]: I1125 10:11:04.184374 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xwnbk" event={"ID":"0ae9a7d0-e882-4647-94e3-3c41a2b18d1b","Type":"ContainerStarted","Data":"53a33085415eb5a5bfd9be732551b6d1daf0e79ab45326cfbb5a9f4f49095bda"} Nov 25 10:11:04 crc kubenswrapper[4854]: I1125 10:11:04.215653 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xwnbk" podStartSLOduration=1.558707284 podStartE2EDuration="2.215627572s" podCreationTimestamp="2025-11-25 10:11:02 +0000 UTC" firstStartedPulling="2025-11-25 10:11:03.132600584 +0000 UTC m=+2068.985593960" lastFinishedPulling="2025-11-25 10:11:03.789520872 +0000 UTC m=+2069.642514248" observedRunningTime="2025-11-25 10:11:04.209017501 +0000 UTC m=+2070.062010887" watchObservedRunningTime="2025-11-25 10:11:04.215627572 +0000 UTC m=+2070.068620948" Nov 25 10:11:10 crc kubenswrapper[4854]: I1125 10:11:10.104000 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-ch9sx"] Nov 25 10:11:10 crc kubenswrapper[4854]: I1125 10:11:10.107980 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ch9sx" Nov 25 10:11:10 crc kubenswrapper[4854]: I1125 10:11:10.132400 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ch9sx"] Nov 25 10:11:10 crc kubenswrapper[4854]: I1125 10:11:10.235356 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/822560d6-dc24-415e-acff-c46ccb3f75c6-utilities\") pod \"certified-operators-ch9sx\" (UID: \"822560d6-dc24-415e-acff-c46ccb3f75c6\") " pod="openshift-marketplace/certified-operators-ch9sx" Nov 25 10:11:10 crc kubenswrapper[4854]: I1125 10:11:10.235396 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b74tq\" (UniqueName: \"kubernetes.io/projected/822560d6-dc24-415e-acff-c46ccb3f75c6-kube-api-access-b74tq\") pod \"certified-operators-ch9sx\" (UID: \"822560d6-dc24-415e-acff-c46ccb3f75c6\") " pod="openshift-marketplace/certified-operators-ch9sx" Nov 25 10:11:10 crc kubenswrapper[4854]: I1125 10:11:10.235635 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/822560d6-dc24-415e-acff-c46ccb3f75c6-catalog-content\") pod \"certified-operators-ch9sx\" (UID: \"822560d6-dc24-415e-acff-c46ccb3f75c6\") " pod="openshift-marketplace/certified-operators-ch9sx" Nov 25 10:11:10 crc kubenswrapper[4854]: I1125 10:11:10.338513 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/822560d6-dc24-415e-acff-c46ccb3f75c6-catalog-content\") pod \"certified-operators-ch9sx\" (UID: \"822560d6-dc24-415e-acff-c46ccb3f75c6\") " pod="openshift-marketplace/certified-operators-ch9sx" Nov 25 10:11:10 crc kubenswrapper[4854]: I1125 10:11:10.338629 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/822560d6-dc24-415e-acff-c46ccb3f75c6-utilities\") pod \"certified-operators-ch9sx\" (UID: \"822560d6-dc24-415e-acff-c46ccb3f75c6\") " pod="openshift-marketplace/certified-operators-ch9sx" Nov 25 10:11:10 crc kubenswrapper[4854]: I1125 10:11:10.338654 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b74tq\" (UniqueName: \"kubernetes.io/projected/822560d6-dc24-415e-acff-c46ccb3f75c6-kube-api-access-b74tq\") pod \"certified-operators-ch9sx\" (UID: \"822560d6-dc24-415e-acff-c46ccb3f75c6\") " pod="openshift-marketplace/certified-operators-ch9sx" Nov 25 10:11:10 crc kubenswrapper[4854]: I1125 10:11:10.339616 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/822560d6-dc24-415e-acff-c46ccb3f75c6-utilities\") pod \"certified-operators-ch9sx\" (UID: \"822560d6-dc24-415e-acff-c46ccb3f75c6\") " pod="openshift-marketplace/certified-operators-ch9sx" Nov 25 10:11:10 crc kubenswrapper[4854]: I1125 10:11:10.339966 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/822560d6-dc24-415e-acff-c46ccb3f75c6-catalog-content\") pod \"certified-operators-ch9sx\" (UID: \"822560d6-dc24-415e-acff-c46ccb3f75c6\") " pod="openshift-marketplace/certified-operators-ch9sx" Nov 25 10:11:10 crc kubenswrapper[4854]: I1125 10:11:10.363609 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b74tq\" (UniqueName: \"kubernetes.io/projected/822560d6-dc24-415e-acff-c46ccb3f75c6-kube-api-access-b74tq\") pod \"certified-operators-ch9sx\" (UID: \"822560d6-dc24-415e-acff-c46ccb3f75c6\") " pod="openshift-marketplace/certified-operators-ch9sx" Nov 25 10:11:10 crc kubenswrapper[4854]: I1125 10:11:10.439257 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ch9sx" Nov 25 10:11:10 crc kubenswrapper[4854]: I1125 10:11:10.992922 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ch9sx"] Nov 25 10:11:11 crc kubenswrapper[4854]: I1125 10:11:11.292287 4854 generic.go:334] "Generic (PLEG): container finished" podID="0ae9a7d0-e882-4647-94e3-3c41a2b18d1b" containerID="53a33085415eb5a5bfd9be732551b6d1daf0e79ab45326cfbb5a9f4f49095bda" exitCode=0 Nov 25 10:11:11 crc kubenswrapper[4854]: I1125 10:11:11.292403 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xwnbk" event={"ID":"0ae9a7d0-e882-4647-94e3-3c41a2b18d1b","Type":"ContainerDied","Data":"53a33085415eb5a5bfd9be732551b6d1daf0e79ab45326cfbb5a9f4f49095bda"} Nov 25 10:11:11 crc kubenswrapper[4854]: I1125 10:11:11.298154 4854 generic.go:334] "Generic (PLEG): container finished" podID="822560d6-dc24-415e-acff-c46ccb3f75c6" containerID="89513008a2cd2e24e565d05c4a6a709eeda37590f439c0b11036cb36898ee237" exitCode=0 Nov 25 10:11:11 crc kubenswrapper[4854]: I1125 10:11:11.298209 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ch9sx" event={"ID":"822560d6-dc24-415e-acff-c46ccb3f75c6","Type":"ContainerDied","Data":"89513008a2cd2e24e565d05c4a6a709eeda37590f439c0b11036cb36898ee237"} Nov 25 10:11:11 crc kubenswrapper[4854]: I1125 10:11:11.298238 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ch9sx" event={"ID":"822560d6-dc24-415e-acff-c46ccb3f75c6","Type":"ContainerStarted","Data":"11e2fa7ba2cd6d42e3cc792aba7e6baee0267691be4a9ad584118f1e8c80c3d8"} Nov 25 10:11:12 crc kubenswrapper[4854]: I1125 10:11:12.816867 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xwnbk" Nov 25 10:11:12 crc kubenswrapper[4854]: I1125 10:11:12.908101 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0ae9a7d0-e882-4647-94e3-3c41a2b18d1b-ssh-key\") pod \"0ae9a7d0-e882-4647-94e3-3c41a2b18d1b\" (UID: \"0ae9a7d0-e882-4647-94e3-3c41a2b18d1b\") " Nov 25 10:11:12 crc kubenswrapper[4854]: I1125 10:11:12.908379 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0ae9a7d0-e882-4647-94e3-3c41a2b18d1b-inventory\") pod \"0ae9a7d0-e882-4647-94e3-3c41a2b18d1b\" (UID: \"0ae9a7d0-e882-4647-94e3-3c41a2b18d1b\") " Nov 25 10:11:12 crc kubenswrapper[4854]: I1125 10:11:12.908609 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-49kd2\" (UniqueName: \"kubernetes.io/projected/0ae9a7d0-e882-4647-94e3-3c41a2b18d1b-kube-api-access-49kd2\") pod \"0ae9a7d0-e882-4647-94e3-3c41a2b18d1b\" (UID: \"0ae9a7d0-e882-4647-94e3-3c41a2b18d1b\") " Nov 25 10:11:12 crc kubenswrapper[4854]: I1125 10:11:12.914275 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0ae9a7d0-e882-4647-94e3-3c41a2b18d1b-kube-api-access-49kd2" (OuterVolumeSpecName: "kube-api-access-49kd2") pod "0ae9a7d0-e882-4647-94e3-3c41a2b18d1b" (UID: "0ae9a7d0-e882-4647-94e3-3c41a2b18d1b"). InnerVolumeSpecName "kube-api-access-49kd2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:11:12 crc kubenswrapper[4854]: I1125 10:11:12.953538 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0ae9a7d0-e882-4647-94e3-3c41a2b18d1b-inventory" (OuterVolumeSpecName: "inventory") pod "0ae9a7d0-e882-4647-94e3-3c41a2b18d1b" (UID: "0ae9a7d0-e882-4647-94e3-3c41a2b18d1b"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:11:12 crc kubenswrapper[4854]: I1125 10:11:12.954049 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0ae9a7d0-e882-4647-94e3-3c41a2b18d1b-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "0ae9a7d0-e882-4647-94e3-3c41a2b18d1b" (UID: "0ae9a7d0-e882-4647-94e3-3c41a2b18d1b"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:11:13 crc kubenswrapper[4854]: I1125 10:11:13.014268 4854 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0ae9a7d0-e882-4647-94e3-3c41a2b18d1b-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:13 crc kubenswrapper[4854]: I1125 10:11:13.014574 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-49kd2\" (UniqueName: \"kubernetes.io/projected/0ae9a7d0-e882-4647-94e3-3c41a2b18d1b-kube-api-access-49kd2\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:13 crc kubenswrapper[4854]: I1125 10:11:13.014605 4854 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0ae9a7d0-e882-4647-94e3-3c41a2b18d1b-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:13 crc kubenswrapper[4854]: I1125 10:11:13.351928 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xwnbk" event={"ID":"0ae9a7d0-e882-4647-94e3-3c41a2b18d1b","Type":"ContainerDied","Data":"2a01ba54c35d8f4157eb42dbfa93ebe13bc7f05160f4eb9bfa94a9b8d1385e5f"} Nov 25 10:11:13 crc kubenswrapper[4854]: I1125 10:11:13.351974 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2a01ba54c35d8f4157eb42dbfa93ebe13bc7f05160f4eb9bfa94a9b8d1385e5f" Nov 25 10:11:13 crc kubenswrapper[4854]: I1125 10:11:13.352040 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xwnbk" Nov 25 10:11:13 crc kubenswrapper[4854]: I1125 10:11:13.364196 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ch9sx" event={"ID":"822560d6-dc24-415e-acff-c46ccb3f75c6","Type":"ContainerStarted","Data":"c29a2219714805eeae81cf5451a70d00f28cc6d35441917a70d41a112506aa96"} Nov 25 10:11:13 crc kubenswrapper[4854]: I1125 10:11:13.443379 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-499vn"] Nov 25 10:11:13 crc kubenswrapper[4854]: E1125 10:11:13.444128 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ae9a7d0-e882-4647-94e3-3c41a2b18d1b" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 25 10:11:13 crc kubenswrapper[4854]: I1125 10:11:13.444146 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ae9a7d0-e882-4647-94e3-3c41a2b18d1b" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 25 10:11:13 crc kubenswrapper[4854]: I1125 10:11:13.444402 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="0ae9a7d0-e882-4647-94e3-3c41a2b18d1b" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 25 10:11:13 crc kubenswrapper[4854]: I1125 10:11:13.445176 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-499vn" Nov 25 10:11:13 crc kubenswrapper[4854]: I1125 10:11:13.448101 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-6xbdw" Nov 25 10:11:13 crc kubenswrapper[4854]: I1125 10:11:13.448242 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 10:11:13 crc kubenswrapper[4854]: I1125 10:11:13.448347 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 10:11:13 crc kubenswrapper[4854]: I1125 10:11:13.448521 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:11:13 crc kubenswrapper[4854]: I1125 10:11:13.472377 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-499vn"] Nov 25 10:11:13 crc kubenswrapper[4854]: I1125 10:11:13.638124 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9e84a5bb-da4d-40aa-a555-a63bd8bd3f10-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-499vn\" (UID: \"9e84a5bb-da4d-40aa-a555-a63bd8bd3f10\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-499vn" Nov 25 10:11:13 crc kubenswrapper[4854]: I1125 10:11:13.638192 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6rpzh\" (UniqueName: \"kubernetes.io/projected/9e84a5bb-da4d-40aa-a555-a63bd8bd3f10-kube-api-access-6rpzh\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-499vn\" (UID: \"9e84a5bb-da4d-40aa-a555-a63bd8bd3f10\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-499vn" Nov 25 10:11:13 crc kubenswrapper[4854]: I1125 10:11:13.638443 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9e84a5bb-da4d-40aa-a555-a63bd8bd3f10-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-499vn\" (UID: \"9e84a5bb-da4d-40aa-a555-a63bd8bd3f10\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-499vn" Nov 25 10:11:13 crc kubenswrapper[4854]: I1125 10:11:13.741496 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9e84a5bb-da4d-40aa-a555-a63bd8bd3f10-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-499vn\" (UID: \"9e84a5bb-da4d-40aa-a555-a63bd8bd3f10\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-499vn" Nov 25 10:11:13 crc kubenswrapper[4854]: I1125 10:11:13.741762 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9e84a5bb-da4d-40aa-a555-a63bd8bd3f10-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-499vn\" (UID: \"9e84a5bb-da4d-40aa-a555-a63bd8bd3f10\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-499vn" Nov 25 10:11:13 crc kubenswrapper[4854]: I1125 10:11:13.741810 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6rpzh\" (UniqueName: \"kubernetes.io/projected/9e84a5bb-da4d-40aa-a555-a63bd8bd3f10-kube-api-access-6rpzh\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-499vn\" (UID: \"9e84a5bb-da4d-40aa-a555-a63bd8bd3f10\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-499vn" Nov 25 10:11:13 crc kubenswrapper[4854]: I1125 10:11:13.746976 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9e84a5bb-da4d-40aa-a555-a63bd8bd3f10-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-499vn\" (UID: \"9e84a5bb-da4d-40aa-a555-a63bd8bd3f10\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-499vn" Nov 25 10:11:13 crc kubenswrapper[4854]: I1125 10:11:13.748287 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9e84a5bb-da4d-40aa-a555-a63bd8bd3f10-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-499vn\" (UID: \"9e84a5bb-da4d-40aa-a555-a63bd8bd3f10\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-499vn" Nov 25 10:11:13 crc kubenswrapper[4854]: I1125 10:11:13.758082 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6rpzh\" (UniqueName: \"kubernetes.io/projected/9e84a5bb-da4d-40aa-a555-a63bd8bd3f10-kube-api-access-6rpzh\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-499vn\" (UID: \"9e84a5bb-da4d-40aa-a555-a63bd8bd3f10\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-499vn" Nov 25 10:11:13 crc kubenswrapper[4854]: I1125 10:11:13.764209 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-499vn" Nov 25 10:11:14 crc kubenswrapper[4854]: I1125 10:11:14.296612 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-499vn"] Nov 25 10:11:14 crc kubenswrapper[4854]: W1125 10:11:14.299722 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9e84a5bb_da4d_40aa_a555_a63bd8bd3f10.slice/crio-2674cc02798a69fe517ad17165dabea15895078fe3696ff89dd0a50276ae1289 WatchSource:0}: Error finding container 2674cc02798a69fe517ad17165dabea15895078fe3696ff89dd0a50276ae1289: Status 404 returned error can't find the container with id 2674cc02798a69fe517ad17165dabea15895078fe3696ff89dd0a50276ae1289 Nov 25 10:11:14 crc kubenswrapper[4854]: I1125 10:11:14.378362 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-499vn" event={"ID":"9e84a5bb-da4d-40aa-a555-a63bd8bd3f10","Type":"ContainerStarted","Data":"2674cc02798a69fe517ad17165dabea15895078fe3696ff89dd0a50276ae1289"} Nov 25 10:11:14 crc kubenswrapper[4854]: I1125 10:11:14.381903 4854 generic.go:334] "Generic (PLEG): container finished" podID="822560d6-dc24-415e-acff-c46ccb3f75c6" containerID="c29a2219714805eeae81cf5451a70d00f28cc6d35441917a70d41a112506aa96" exitCode=0 Nov 25 10:11:14 crc kubenswrapper[4854]: I1125 10:11:14.381947 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ch9sx" event={"ID":"822560d6-dc24-415e-acff-c46ccb3f75c6","Type":"ContainerDied","Data":"c29a2219714805eeae81cf5451a70d00f28cc6d35441917a70d41a112506aa96"} Nov 25 10:11:15 crc kubenswrapper[4854]: I1125 10:11:15.394578 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ch9sx" event={"ID":"822560d6-dc24-415e-acff-c46ccb3f75c6","Type":"ContainerStarted","Data":"827403e7a50b073ae76472f10082f160cd06b9e7eba681d726018ccb1a87488d"} Nov 25 10:11:15 crc kubenswrapper[4854]: I1125 10:11:15.397094 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-499vn" event={"ID":"9e84a5bb-da4d-40aa-a555-a63bd8bd3f10","Type":"ContainerStarted","Data":"7c56baf70300d8597e49071140ac8ccdca178f0b247fe460347e0c9bfed3a220"} Nov 25 10:11:15 crc kubenswrapper[4854]: I1125 10:11:15.419067 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-ch9sx" podStartSLOduration=1.9151197290000002 podStartE2EDuration="5.419040758s" podCreationTimestamp="2025-11-25 10:11:10 +0000 UTC" firstStartedPulling="2025-11-25 10:11:11.300530535 +0000 UTC m=+2077.153523911" lastFinishedPulling="2025-11-25 10:11:14.804451564 +0000 UTC m=+2080.657444940" observedRunningTime="2025-11-25 10:11:15.411167112 +0000 UTC m=+2081.264160508" watchObservedRunningTime="2025-11-25 10:11:15.419040758 +0000 UTC m=+2081.272034134" Nov 25 10:11:15 crc kubenswrapper[4854]: I1125 10:11:15.463472 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-499vn" podStartSLOduration=1.942598438 podStartE2EDuration="2.463446938s" podCreationTimestamp="2025-11-25 10:11:13 +0000 UTC" firstStartedPulling="2025-11-25 10:11:14.302540933 +0000 UTC m=+2080.155534309" lastFinishedPulling="2025-11-25 10:11:14.823389443 +0000 UTC m=+2080.676382809" observedRunningTime="2025-11-25 10:11:15.444150708 +0000 UTC m=+2081.297144094" watchObservedRunningTime="2025-11-25 10:11:15.463446938 +0000 UTC m=+2081.316440314" Nov 25 10:11:20 crc kubenswrapper[4854]: I1125 10:11:20.439552 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-ch9sx" Nov 25 10:11:20 crc kubenswrapper[4854]: I1125 10:11:20.440449 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-ch9sx" Nov 25 10:11:20 crc kubenswrapper[4854]: I1125 10:11:20.496135 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-ch9sx" Nov 25 10:11:21 crc kubenswrapper[4854]: I1125 10:11:21.554301 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-ch9sx" Nov 25 10:11:21 crc kubenswrapper[4854]: I1125 10:11:21.621249 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ch9sx"] Nov 25 10:11:23 crc kubenswrapper[4854]: I1125 10:11:23.501428 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-ch9sx" podUID="822560d6-dc24-415e-acff-c46ccb3f75c6" containerName="registry-server" containerID="cri-o://827403e7a50b073ae76472f10082f160cd06b9e7eba681d726018ccb1a87488d" gracePeriod=2 Nov 25 10:11:24 crc kubenswrapper[4854]: I1125 10:11:24.095200 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ch9sx" Nov 25 10:11:24 crc kubenswrapper[4854]: I1125 10:11:24.202986 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/822560d6-dc24-415e-acff-c46ccb3f75c6-utilities\") pod \"822560d6-dc24-415e-acff-c46ccb3f75c6\" (UID: \"822560d6-dc24-415e-acff-c46ccb3f75c6\") " Nov 25 10:11:24 crc kubenswrapper[4854]: I1125 10:11:24.203384 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/822560d6-dc24-415e-acff-c46ccb3f75c6-catalog-content\") pod \"822560d6-dc24-415e-acff-c46ccb3f75c6\" (UID: \"822560d6-dc24-415e-acff-c46ccb3f75c6\") " Nov 25 10:11:24 crc kubenswrapper[4854]: I1125 10:11:24.203934 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/822560d6-dc24-415e-acff-c46ccb3f75c6-utilities" (OuterVolumeSpecName: "utilities") pod "822560d6-dc24-415e-acff-c46ccb3f75c6" (UID: "822560d6-dc24-415e-acff-c46ccb3f75c6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:11:24 crc kubenswrapper[4854]: I1125 10:11:24.204133 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b74tq\" (UniqueName: \"kubernetes.io/projected/822560d6-dc24-415e-acff-c46ccb3f75c6-kube-api-access-b74tq\") pod \"822560d6-dc24-415e-acff-c46ccb3f75c6\" (UID: \"822560d6-dc24-415e-acff-c46ccb3f75c6\") " Nov 25 10:11:24 crc kubenswrapper[4854]: I1125 10:11:24.205189 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/822560d6-dc24-415e-acff-c46ccb3f75c6-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:24 crc kubenswrapper[4854]: I1125 10:11:24.208641 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/822560d6-dc24-415e-acff-c46ccb3f75c6-kube-api-access-b74tq" (OuterVolumeSpecName: "kube-api-access-b74tq") pod "822560d6-dc24-415e-acff-c46ccb3f75c6" (UID: "822560d6-dc24-415e-acff-c46ccb3f75c6"). InnerVolumeSpecName "kube-api-access-b74tq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:11:24 crc kubenswrapper[4854]: I1125 10:11:24.267197 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/822560d6-dc24-415e-acff-c46ccb3f75c6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "822560d6-dc24-415e-acff-c46ccb3f75c6" (UID: "822560d6-dc24-415e-acff-c46ccb3f75c6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:11:24 crc kubenswrapper[4854]: I1125 10:11:24.307877 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b74tq\" (UniqueName: \"kubernetes.io/projected/822560d6-dc24-415e-acff-c46ccb3f75c6-kube-api-access-b74tq\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:24 crc kubenswrapper[4854]: I1125 10:11:24.307939 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/822560d6-dc24-415e-acff-c46ccb3f75c6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:24 crc kubenswrapper[4854]: I1125 10:11:24.516959 4854 generic.go:334] "Generic (PLEG): container finished" podID="822560d6-dc24-415e-acff-c46ccb3f75c6" containerID="827403e7a50b073ae76472f10082f160cd06b9e7eba681d726018ccb1a87488d" exitCode=0 Nov 25 10:11:24 crc kubenswrapper[4854]: I1125 10:11:24.517035 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ch9sx" event={"ID":"822560d6-dc24-415e-acff-c46ccb3f75c6","Type":"ContainerDied","Data":"827403e7a50b073ae76472f10082f160cd06b9e7eba681d726018ccb1a87488d"} Nov 25 10:11:24 crc kubenswrapper[4854]: I1125 10:11:24.517098 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ch9sx" event={"ID":"822560d6-dc24-415e-acff-c46ccb3f75c6","Type":"ContainerDied","Data":"11e2fa7ba2cd6d42e3cc792aba7e6baee0267691be4a9ad584118f1e8c80c3d8"} Nov 25 10:11:24 crc kubenswrapper[4854]: I1125 10:11:24.517122 4854 scope.go:117] "RemoveContainer" containerID="827403e7a50b073ae76472f10082f160cd06b9e7eba681d726018ccb1a87488d" Nov 25 10:11:24 crc kubenswrapper[4854]: I1125 10:11:24.517954 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ch9sx" Nov 25 10:11:24 crc kubenswrapper[4854]: I1125 10:11:24.569602 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ch9sx"] Nov 25 10:11:24 crc kubenswrapper[4854]: I1125 10:11:24.571567 4854 scope.go:117] "RemoveContainer" containerID="c29a2219714805eeae81cf5451a70d00f28cc6d35441917a70d41a112506aa96" Nov 25 10:11:24 crc kubenswrapper[4854]: I1125 10:11:24.583001 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-ch9sx"] Nov 25 10:11:24 crc kubenswrapper[4854]: I1125 10:11:24.601461 4854 scope.go:117] "RemoveContainer" containerID="89513008a2cd2e24e565d05c4a6a709eeda37590f439c0b11036cb36898ee237" Nov 25 10:11:24 crc kubenswrapper[4854]: I1125 10:11:24.673038 4854 scope.go:117] "RemoveContainer" containerID="827403e7a50b073ae76472f10082f160cd06b9e7eba681d726018ccb1a87488d" Nov 25 10:11:24 crc kubenswrapper[4854]: E1125 10:11:24.673491 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"827403e7a50b073ae76472f10082f160cd06b9e7eba681d726018ccb1a87488d\": container with ID starting with 827403e7a50b073ae76472f10082f160cd06b9e7eba681d726018ccb1a87488d not found: ID does not exist" containerID="827403e7a50b073ae76472f10082f160cd06b9e7eba681d726018ccb1a87488d" Nov 25 10:11:24 crc kubenswrapper[4854]: I1125 10:11:24.673537 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"827403e7a50b073ae76472f10082f160cd06b9e7eba681d726018ccb1a87488d"} err="failed to get container status \"827403e7a50b073ae76472f10082f160cd06b9e7eba681d726018ccb1a87488d\": rpc error: code = NotFound desc = could not find container \"827403e7a50b073ae76472f10082f160cd06b9e7eba681d726018ccb1a87488d\": container with ID starting with 827403e7a50b073ae76472f10082f160cd06b9e7eba681d726018ccb1a87488d not found: ID does not exist" Nov 25 10:11:24 crc kubenswrapper[4854]: I1125 10:11:24.673568 4854 scope.go:117] "RemoveContainer" containerID="c29a2219714805eeae81cf5451a70d00f28cc6d35441917a70d41a112506aa96" Nov 25 10:11:24 crc kubenswrapper[4854]: E1125 10:11:24.673936 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c29a2219714805eeae81cf5451a70d00f28cc6d35441917a70d41a112506aa96\": container with ID starting with c29a2219714805eeae81cf5451a70d00f28cc6d35441917a70d41a112506aa96 not found: ID does not exist" containerID="c29a2219714805eeae81cf5451a70d00f28cc6d35441917a70d41a112506aa96" Nov 25 10:11:24 crc kubenswrapper[4854]: I1125 10:11:24.673973 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c29a2219714805eeae81cf5451a70d00f28cc6d35441917a70d41a112506aa96"} err="failed to get container status \"c29a2219714805eeae81cf5451a70d00f28cc6d35441917a70d41a112506aa96\": rpc error: code = NotFound desc = could not find container \"c29a2219714805eeae81cf5451a70d00f28cc6d35441917a70d41a112506aa96\": container with ID starting with c29a2219714805eeae81cf5451a70d00f28cc6d35441917a70d41a112506aa96 not found: ID does not exist" Nov 25 10:11:24 crc kubenswrapper[4854]: I1125 10:11:24.674002 4854 scope.go:117] "RemoveContainer" containerID="89513008a2cd2e24e565d05c4a6a709eeda37590f439c0b11036cb36898ee237" Nov 25 10:11:24 crc kubenswrapper[4854]: E1125 10:11:24.676463 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"89513008a2cd2e24e565d05c4a6a709eeda37590f439c0b11036cb36898ee237\": container with ID starting with 89513008a2cd2e24e565d05c4a6a709eeda37590f439c0b11036cb36898ee237 not found: ID does not exist" containerID="89513008a2cd2e24e565d05c4a6a709eeda37590f439c0b11036cb36898ee237" Nov 25 10:11:24 crc kubenswrapper[4854]: I1125 10:11:24.676515 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"89513008a2cd2e24e565d05c4a6a709eeda37590f439c0b11036cb36898ee237"} err="failed to get container status \"89513008a2cd2e24e565d05c4a6a709eeda37590f439c0b11036cb36898ee237\": rpc error: code = NotFound desc = could not find container \"89513008a2cd2e24e565d05c4a6a709eeda37590f439c0b11036cb36898ee237\": container with ID starting with 89513008a2cd2e24e565d05c4a6a709eeda37590f439c0b11036cb36898ee237 not found: ID does not exist" Nov 25 10:11:25 crc kubenswrapper[4854]: I1125 10:11:25.028806 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:11:25 crc kubenswrapper[4854]: I1125 10:11:25.029324 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:11:25 crc kubenswrapper[4854]: I1125 10:11:25.034319 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="822560d6-dc24-415e-acff-c46ccb3f75c6" path="/var/lib/kubelet/pods/822560d6-dc24-415e-acff-c46ccb3f75c6/volumes" Nov 25 10:11:25 crc kubenswrapper[4854]: I1125 10:11:25.036104 4854 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" Nov 25 10:11:25 crc kubenswrapper[4854]: I1125 10:11:25.037602 4854 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c0e53261c1b00dc00042ef24d7f5409ac882efa92a250513a37cff1d9e331ec1"} pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 10:11:25 crc kubenswrapper[4854]: I1125 10:11:25.037761 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" containerID="cri-o://c0e53261c1b00dc00042ef24d7f5409ac882efa92a250513a37cff1d9e331ec1" gracePeriod=600 Nov 25 10:11:25 crc kubenswrapper[4854]: I1125 10:11:25.531381 4854 generic.go:334] "Generic (PLEG): container finished" podID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerID="c0e53261c1b00dc00042ef24d7f5409ac882efa92a250513a37cff1d9e331ec1" exitCode=0 Nov 25 10:11:25 crc kubenswrapper[4854]: I1125 10:11:25.531441 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerDied","Data":"c0e53261c1b00dc00042ef24d7f5409ac882efa92a250513a37cff1d9e331ec1"} Nov 25 10:11:25 crc kubenswrapper[4854]: I1125 10:11:25.531756 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerStarted","Data":"087635fd301f31cb709ca3e31dcfb761f6ed178214d846b50fc60a19c19a65a8"} Nov 25 10:11:25 crc kubenswrapper[4854]: I1125 10:11:25.531781 4854 scope.go:117] "RemoveContainer" containerID="e218b542fd934fd34b157757f419e89c8565fa64cb58598ebd3da742271577ef" Nov 25 10:11:44 crc kubenswrapper[4854]: I1125 10:11:44.057356 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-nl4hd"] Nov 25 10:11:44 crc kubenswrapper[4854]: I1125 10:11:44.069069 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-nl4hd"] Nov 25 10:11:45 crc kubenswrapper[4854]: I1125 10:11:45.035216 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e27e9250-032d-44da-8819-ce560d4f9c3f" path="/var/lib/kubelet/pods/e27e9250-032d-44da-8819-ce560d4f9c3f/volumes" Nov 25 10:11:54 crc kubenswrapper[4854]: I1125 10:11:54.877622 4854 generic.go:334] "Generic (PLEG): container finished" podID="9e84a5bb-da4d-40aa-a555-a63bd8bd3f10" containerID="7c56baf70300d8597e49071140ac8ccdca178f0b247fe460347e0c9bfed3a220" exitCode=0 Nov 25 10:11:54 crc kubenswrapper[4854]: I1125 10:11:54.877814 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-499vn" event={"ID":"9e84a5bb-da4d-40aa-a555-a63bd8bd3f10","Type":"ContainerDied","Data":"7c56baf70300d8597e49071140ac8ccdca178f0b247fe460347e0c9bfed3a220"} Nov 25 10:11:55 crc kubenswrapper[4854]: I1125 10:11:55.648162 4854 scope.go:117] "RemoveContainer" containerID="fef0d09ac9cddbbf9f4f7cb64f43adf59990ebece51f482f846184bdc7f7e414" Nov 25 10:11:55 crc kubenswrapper[4854]: I1125 10:11:55.691842 4854 scope.go:117] "RemoveContainer" containerID="3c11e766e45c2c78b8029ebcb599b81e34371c452f7c6fba4eab4be4b6855590" Nov 25 10:11:56 crc kubenswrapper[4854]: I1125 10:11:56.405576 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-499vn" Nov 25 10:11:56 crc kubenswrapper[4854]: I1125 10:11:56.432277 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9e84a5bb-da4d-40aa-a555-a63bd8bd3f10-inventory\") pod \"9e84a5bb-da4d-40aa-a555-a63bd8bd3f10\" (UID: \"9e84a5bb-da4d-40aa-a555-a63bd8bd3f10\") " Nov 25 10:11:56 crc kubenswrapper[4854]: I1125 10:11:56.432601 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6rpzh\" (UniqueName: \"kubernetes.io/projected/9e84a5bb-da4d-40aa-a555-a63bd8bd3f10-kube-api-access-6rpzh\") pod \"9e84a5bb-da4d-40aa-a555-a63bd8bd3f10\" (UID: \"9e84a5bb-da4d-40aa-a555-a63bd8bd3f10\") " Nov 25 10:11:56 crc kubenswrapper[4854]: I1125 10:11:56.432745 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9e84a5bb-da4d-40aa-a555-a63bd8bd3f10-ssh-key\") pod \"9e84a5bb-da4d-40aa-a555-a63bd8bd3f10\" (UID: \"9e84a5bb-da4d-40aa-a555-a63bd8bd3f10\") " Nov 25 10:11:56 crc kubenswrapper[4854]: I1125 10:11:56.444666 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e84a5bb-da4d-40aa-a555-a63bd8bd3f10-kube-api-access-6rpzh" (OuterVolumeSpecName: "kube-api-access-6rpzh") pod "9e84a5bb-da4d-40aa-a555-a63bd8bd3f10" (UID: "9e84a5bb-da4d-40aa-a555-a63bd8bd3f10"). InnerVolumeSpecName "kube-api-access-6rpzh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:11:56 crc kubenswrapper[4854]: I1125 10:11:56.475790 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e84a5bb-da4d-40aa-a555-a63bd8bd3f10-inventory" (OuterVolumeSpecName: "inventory") pod "9e84a5bb-da4d-40aa-a555-a63bd8bd3f10" (UID: "9e84a5bb-da4d-40aa-a555-a63bd8bd3f10"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:11:56 crc kubenswrapper[4854]: I1125 10:11:56.483110 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e84a5bb-da4d-40aa-a555-a63bd8bd3f10-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "9e84a5bb-da4d-40aa-a555-a63bd8bd3f10" (UID: "9e84a5bb-da4d-40aa-a555-a63bd8bd3f10"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:11:56 crc kubenswrapper[4854]: I1125 10:11:56.535757 4854 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9e84a5bb-da4d-40aa-a555-a63bd8bd3f10-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:56 crc kubenswrapper[4854]: I1125 10:11:56.535791 4854 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9e84a5bb-da4d-40aa-a555-a63bd8bd3f10-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:56 crc kubenswrapper[4854]: I1125 10:11:56.535807 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6rpzh\" (UniqueName: \"kubernetes.io/projected/9e84a5bb-da4d-40aa-a555-a63bd8bd3f10-kube-api-access-6rpzh\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:56 crc kubenswrapper[4854]: I1125 10:11:56.902491 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-499vn" event={"ID":"9e84a5bb-da4d-40aa-a555-a63bd8bd3f10","Type":"ContainerDied","Data":"2674cc02798a69fe517ad17165dabea15895078fe3696ff89dd0a50276ae1289"} Nov 25 10:11:56 crc kubenswrapper[4854]: I1125 10:11:56.902797 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2674cc02798a69fe517ad17165dabea15895078fe3696ff89dd0a50276ae1289" Nov 25 10:11:56 crc kubenswrapper[4854]: I1125 10:11:56.902567 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-499vn" Nov 25 10:11:56 crc kubenswrapper[4854]: I1125 10:11:56.997736 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-22qxt"] Nov 25 10:11:56 crc kubenswrapper[4854]: E1125 10:11:56.998322 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e84a5bb-da4d-40aa-a555-a63bd8bd3f10" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 25 10:11:56 crc kubenswrapper[4854]: I1125 10:11:56.998347 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e84a5bb-da4d-40aa-a555-a63bd8bd3f10" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 25 10:11:56 crc kubenswrapper[4854]: E1125 10:11:56.998369 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="822560d6-dc24-415e-acff-c46ccb3f75c6" containerName="extract-content" Nov 25 10:11:56 crc kubenswrapper[4854]: I1125 10:11:56.998378 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="822560d6-dc24-415e-acff-c46ccb3f75c6" containerName="extract-content" Nov 25 10:11:56 crc kubenswrapper[4854]: E1125 10:11:56.998418 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="822560d6-dc24-415e-acff-c46ccb3f75c6" containerName="extract-utilities" Nov 25 10:11:56 crc kubenswrapper[4854]: I1125 10:11:56.998429 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="822560d6-dc24-415e-acff-c46ccb3f75c6" containerName="extract-utilities" Nov 25 10:11:56 crc kubenswrapper[4854]: E1125 10:11:56.998445 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="822560d6-dc24-415e-acff-c46ccb3f75c6" containerName="registry-server" Nov 25 10:11:56 crc kubenswrapper[4854]: I1125 10:11:56.998453 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="822560d6-dc24-415e-acff-c46ccb3f75c6" containerName="registry-server" Nov 25 10:11:56 crc kubenswrapper[4854]: I1125 10:11:56.998768 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="822560d6-dc24-415e-acff-c46ccb3f75c6" containerName="registry-server" Nov 25 10:11:56 crc kubenswrapper[4854]: I1125 10:11:56.998806 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e84a5bb-da4d-40aa-a555-a63bd8bd3f10" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 25 10:11:56 crc kubenswrapper[4854]: I1125 10:11:56.999880 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-22qxt" Nov 25 10:11:57 crc kubenswrapper[4854]: I1125 10:11:57.004522 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 10:11:57 crc kubenswrapper[4854]: I1125 10:11:57.004897 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:11:57 crc kubenswrapper[4854]: I1125 10:11:57.005072 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-6xbdw" Nov 25 10:11:57 crc kubenswrapper[4854]: I1125 10:11:57.005216 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 10:11:57 crc kubenswrapper[4854]: I1125 10:11:57.063717 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-22qxt"] Nov 25 10:11:57 crc kubenswrapper[4854]: I1125 10:11:57.068479 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/751a11f6-66a6-4336-81ff-8138ffe4f076-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-22qxt\" (UID: \"751a11f6-66a6-4336-81ff-8138ffe4f076\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-22qxt" Nov 25 10:11:57 crc kubenswrapper[4854]: I1125 10:11:57.069288 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5bsdw\" (UniqueName: \"kubernetes.io/projected/751a11f6-66a6-4336-81ff-8138ffe4f076-kube-api-access-5bsdw\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-22qxt\" (UID: \"751a11f6-66a6-4336-81ff-8138ffe4f076\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-22qxt" Nov 25 10:11:57 crc kubenswrapper[4854]: I1125 10:11:57.069489 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/751a11f6-66a6-4336-81ff-8138ffe4f076-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-22qxt\" (UID: \"751a11f6-66a6-4336-81ff-8138ffe4f076\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-22qxt" Nov 25 10:11:57 crc kubenswrapper[4854]: I1125 10:11:57.171666 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5bsdw\" (UniqueName: \"kubernetes.io/projected/751a11f6-66a6-4336-81ff-8138ffe4f076-kube-api-access-5bsdw\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-22qxt\" (UID: \"751a11f6-66a6-4336-81ff-8138ffe4f076\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-22qxt" Nov 25 10:11:57 crc kubenswrapper[4854]: I1125 10:11:57.171774 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/751a11f6-66a6-4336-81ff-8138ffe4f076-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-22qxt\" (UID: \"751a11f6-66a6-4336-81ff-8138ffe4f076\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-22qxt" Nov 25 10:11:57 crc kubenswrapper[4854]: I1125 10:11:57.171836 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/751a11f6-66a6-4336-81ff-8138ffe4f076-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-22qxt\" (UID: \"751a11f6-66a6-4336-81ff-8138ffe4f076\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-22qxt" Nov 25 10:11:57 crc kubenswrapper[4854]: I1125 10:11:57.175387 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/751a11f6-66a6-4336-81ff-8138ffe4f076-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-22qxt\" (UID: \"751a11f6-66a6-4336-81ff-8138ffe4f076\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-22qxt" Nov 25 10:11:57 crc kubenswrapper[4854]: I1125 10:11:57.182629 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/751a11f6-66a6-4336-81ff-8138ffe4f076-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-22qxt\" (UID: \"751a11f6-66a6-4336-81ff-8138ffe4f076\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-22qxt" Nov 25 10:11:57 crc kubenswrapper[4854]: I1125 10:11:57.187884 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5bsdw\" (UniqueName: \"kubernetes.io/projected/751a11f6-66a6-4336-81ff-8138ffe4f076-kube-api-access-5bsdw\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-22qxt\" (UID: \"751a11f6-66a6-4336-81ff-8138ffe4f076\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-22qxt" Nov 25 10:11:57 crc kubenswrapper[4854]: I1125 10:11:57.327982 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-22qxt" Nov 25 10:11:57 crc kubenswrapper[4854]: I1125 10:11:57.931307 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-22qxt"] Nov 25 10:11:58 crc kubenswrapper[4854]: I1125 10:11:58.940326 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-22qxt" event={"ID":"751a11f6-66a6-4336-81ff-8138ffe4f076","Type":"ContainerStarted","Data":"820431e17deaa64251d6cbc6c392e81d5de2418181812d5ebaecf951019f93fd"} Nov 25 10:11:58 crc kubenswrapper[4854]: I1125 10:11:58.940627 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-22qxt" event={"ID":"751a11f6-66a6-4336-81ff-8138ffe4f076","Type":"ContainerStarted","Data":"f83f73ef1cb3138c34e4f8c7444d19fafa3f3972c0d1b875109b8215631beb08"} Nov 25 10:11:58 crc kubenswrapper[4854]: I1125 10:11:58.962070 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-22qxt" podStartSLOduration=2.400073709 podStartE2EDuration="2.96205341s" podCreationTimestamp="2025-11-25 10:11:56 +0000 UTC" firstStartedPulling="2025-11-25 10:11:57.93903697 +0000 UTC m=+2123.792030346" lastFinishedPulling="2025-11-25 10:11:58.501016681 +0000 UTC m=+2124.354010047" observedRunningTime="2025-11-25 10:11:58.958936944 +0000 UTC m=+2124.811930320" watchObservedRunningTime="2025-11-25 10:11:58.96205341 +0000 UTC m=+2124.815046786" Nov 25 10:12:56 crc kubenswrapper[4854]: I1125 10:12:56.048146 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-sync-hcn8p"] Nov 25 10:12:56 crc kubenswrapper[4854]: I1125 10:12:56.061169 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-sync-hcn8p"] Nov 25 10:12:57 crc kubenswrapper[4854]: I1125 10:12:57.026528 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7909b58c-9614-4859-b5c9-be2fd2c77fc8" path="/var/lib/kubelet/pods/7909b58c-9614-4859-b5c9-be2fd2c77fc8/volumes" Nov 25 10:12:57 crc kubenswrapper[4854]: I1125 10:12:57.783986 4854 generic.go:334] "Generic (PLEG): container finished" podID="751a11f6-66a6-4336-81ff-8138ffe4f076" containerID="820431e17deaa64251d6cbc6c392e81d5de2418181812d5ebaecf951019f93fd" exitCode=0 Nov 25 10:12:57 crc kubenswrapper[4854]: I1125 10:12:57.784039 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-22qxt" event={"ID":"751a11f6-66a6-4336-81ff-8138ffe4f076","Type":"ContainerDied","Data":"820431e17deaa64251d6cbc6c392e81d5de2418181812d5ebaecf951019f93fd"} Nov 25 10:12:59 crc kubenswrapper[4854]: I1125 10:12:59.327127 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-22qxt" Nov 25 10:12:59 crc kubenswrapper[4854]: I1125 10:12:59.461791 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/751a11f6-66a6-4336-81ff-8138ffe4f076-inventory\") pod \"751a11f6-66a6-4336-81ff-8138ffe4f076\" (UID: \"751a11f6-66a6-4336-81ff-8138ffe4f076\") " Nov 25 10:12:59 crc kubenswrapper[4854]: I1125 10:12:59.462308 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/751a11f6-66a6-4336-81ff-8138ffe4f076-ssh-key\") pod \"751a11f6-66a6-4336-81ff-8138ffe4f076\" (UID: \"751a11f6-66a6-4336-81ff-8138ffe4f076\") " Nov 25 10:12:59 crc kubenswrapper[4854]: I1125 10:12:59.462410 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5bsdw\" (UniqueName: \"kubernetes.io/projected/751a11f6-66a6-4336-81ff-8138ffe4f076-kube-api-access-5bsdw\") pod \"751a11f6-66a6-4336-81ff-8138ffe4f076\" (UID: \"751a11f6-66a6-4336-81ff-8138ffe4f076\") " Nov 25 10:12:59 crc kubenswrapper[4854]: I1125 10:12:59.470206 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/751a11f6-66a6-4336-81ff-8138ffe4f076-kube-api-access-5bsdw" (OuterVolumeSpecName: "kube-api-access-5bsdw") pod "751a11f6-66a6-4336-81ff-8138ffe4f076" (UID: "751a11f6-66a6-4336-81ff-8138ffe4f076"). InnerVolumeSpecName "kube-api-access-5bsdw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:12:59 crc kubenswrapper[4854]: I1125 10:12:59.495849 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/751a11f6-66a6-4336-81ff-8138ffe4f076-inventory" (OuterVolumeSpecName: "inventory") pod "751a11f6-66a6-4336-81ff-8138ffe4f076" (UID: "751a11f6-66a6-4336-81ff-8138ffe4f076"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:12:59 crc kubenswrapper[4854]: I1125 10:12:59.496183 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/751a11f6-66a6-4336-81ff-8138ffe4f076-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "751a11f6-66a6-4336-81ff-8138ffe4f076" (UID: "751a11f6-66a6-4336-81ff-8138ffe4f076"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:12:59 crc kubenswrapper[4854]: I1125 10:12:59.567645 4854 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/751a11f6-66a6-4336-81ff-8138ffe4f076-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:12:59 crc kubenswrapper[4854]: I1125 10:12:59.567712 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5bsdw\" (UniqueName: \"kubernetes.io/projected/751a11f6-66a6-4336-81ff-8138ffe4f076-kube-api-access-5bsdw\") on node \"crc\" DevicePath \"\"" Nov 25 10:12:59 crc kubenswrapper[4854]: I1125 10:12:59.567728 4854 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/751a11f6-66a6-4336-81ff-8138ffe4f076-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:12:59 crc kubenswrapper[4854]: I1125 10:12:59.806328 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-22qxt" event={"ID":"751a11f6-66a6-4336-81ff-8138ffe4f076","Type":"ContainerDied","Data":"f83f73ef1cb3138c34e4f8c7444d19fafa3f3972c0d1b875109b8215631beb08"} Nov 25 10:12:59 crc kubenswrapper[4854]: I1125 10:12:59.806367 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f83f73ef1cb3138c34e4f8c7444d19fafa3f3972c0d1b875109b8215631beb08" Nov 25 10:12:59 crc kubenswrapper[4854]: I1125 10:12:59.806800 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-22qxt" Nov 25 10:12:59 crc kubenswrapper[4854]: I1125 10:12:59.910794 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-8pcrq"] Nov 25 10:12:59 crc kubenswrapper[4854]: E1125 10:12:59.911363 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="751a11f6-66a6-4336-81ff-8138ffe4f076" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 25 10:12:59 crc kubenswrapper[4854]: I1125 10:12:59.911388 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="751a11f6-66a6-4336-81ff-8138ffe4f076" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 25 10:12:59 crc kubenswrapper[4854]: I1125 10:12:59.911630 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="751a11f6-66a6-4336-81ff-8138ffe4f076" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 25 10:12:59 crc kubenswrapper[4854]: I1125 10:12:59.912505 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-8pcrq" Nov 25 10:12:59 crc kubenswrapper[4854]: I1125 10:12:59.914311 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 10:12:59 crc kubenswrapper[4854]: I1125 10:12:59.914469 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 10:12:59 crc kubenswrapper[4854]: I1125 10:12:59.914492 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-6xbdw" Nov 25 10:12:59 crc kubenswrapper[4854]: I1125 10:12:59.914643 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:12:59 crc kubenswrapper[4854]: I1125 10:12:59.922723 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-8pcrq"] Nov 25 10:13:00 crc kubenswrapper[4854]: I1125 10:13:00.079153 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ed3268b7-e706-4ec0-9bb9-7e6c86414b2a-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-8pcrq\" (UID: \"ed3268b7-e706-4ec0-9bb9-7e6c86414b2a\") " pod="openstack/ssh-known-hosts-edpm-deployment-8pcrq" Nov 25 10:13:00 crc kubenswrapper[4854]: I1125 10:13:00.079488 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/ed3268b7-e706-4ec0-9bb9-7e6c86414b2a-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-8pcrq\" (UID: \"ed3268b7-e706-4ec0-9bb9-7e6c86414b2a\") " pod="openstack/ssh-known-hosts-edpm-deployment-8pcrq" Nov 25 10:13:00 crc kubenswrapper[4854]: I1125 10:13:00.079611 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rtlfl\" (UniqueName: \"kubernetes.io/projected/ed3268b7-e706-4ec0-9bb9-7e6c86414b2a-kube-api-access-rtlfl\") pod \"ssh-known-hosts-edpm-deployment-8pcrq\" (UID: \"ed3268b7-e706-4ec0-9bb9-7e6c86414b2a\") " pod="openstack/ssh-known-hosts-edpm-deployment-8pcrq" Nov 25 10:13:00 crc kubenswrapper[4854]: I1125 10:13:00.181529 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ed3268b7-e706-4ec0-9bb9-7e6c86414b2a-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-8pcrq\" (UID: \"ed3268b7-e706-4ec0-9bb9-7e6c86414b2a\") " pod="openstack/ssh-known-hosts-edpm-deployment-8pcrq" Nov 25 10:13:00 crc kubenswrapper[4854]: I1125 10:13:00.181612 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/ed3268b7-e706-4ec0-9bb9-7e6c86414b2a-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-8pcrq\" (UID: \"ed3268b7-e706-4ec0-9bb9-7e6c86414b2a\") " pod="openstack/ssh-known-hosts-edpm-deployment-8pcrq" Nov 25 10:13:00 crc kubenswrapper[4854]: I1125 10:13:00.181868 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rtlfl\" (UniqueName: \"kubernetes.io/projected/ed3268b7-e706-4ec0-9bb9-7e6c86414b2a-kube-api-access-rtlfl\") pod \"ssh-known-hosts-edpm-deployment-8pcrq\" (UID: \"ed3268b7-e706-4ec0-9bb9-7e6c86414b2a\") " pod="openstack/ssh-known-hosts-edpm-deployment-8pcrq" Nov 25 10:13:00 crc kubenswrapper[4854]: I1125 10:13:00.185203 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ed3268b7-e706-4ec0-9bb9-7e6c86414b2a-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-8pcrq\" (UID: \"ed3268b7-e706-4ec0-9bb9-7e6c86414b2a\") " pod="openstack/ssh-known-hosts-edpm-deployment-8pcrq" Nov 25 10:13:00 crc kubenswrapper[4854]: I1125 10:13:00.192456 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/ed3268b7-e706-4ec0-9bb9-7e6c86414b2a-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-8pcrq\" (UID: \"ed3268b7-e706-4ec0-9bb9-7e6c86414b2a\") " pod="openstack/ssh-known-hosts-edpm-deployment-8pcrq" Nov 25 10:13:00 crc kubenswrapper[4854]: I1125 10:13:00.202172 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rtlfl\" (UniqueName: \"kubernetes.io/projected/ed3268b7-e706-4ec0-9bb9-7e6c86414b2a-kube-api-access-rtlfl\") pod \"ssh-known-hosts-edpm-deployment-8pcrq\" (UID: \"ed3268b7-e706-4ec0-9bb9-7e6c86414b2a\") " pod="openstack/ssh-known-hosts-edpm-deployment-8pcrq" Nov 25 10:13:00 crc kubenswrapper[4854]: I1125 10:13:00.236246 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-8pcrq" Nov 25 10:13:00 crc kubenswrapper[4854]: I1125 10:13:00.851125 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-8pcrq"] Nov 25 10:13:00 crc kubenswrapper[4854]: I1125 10:13:00.858595 4854 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 10:13:01 crc kubenswrapper[4854]: I1125 10:13:01.836994 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-8pcrq" event={"ID":"ed3268b7-e706-4ec0-9bb9-7e6c86414b2a","Type":"ContainerStarted","Data":"efaab2868a5d783d5163ef5b4c4e5b2ca30a49500d0d78da39caf5c6a7c3027e"} Nov 25 10:13:01 crc kubenswrapper[4854]: I1125 10:13:01.837811 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-8pcrq" event={"ID":"ed3268b7-e706-4ec0-9bb9-7e6c86414b2a","Type":"ContainerStarted","Data":"fcb53c0fadcced94a6b326eb43b8eb3a71ec451d47d9570216f227390c51a0bc"} Nov 25 10:13:01 crc kubenswrapper[4854]: I1125 10:13:01.860105 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-8pcrq" podStartSLOduration=2.437230091 podStartE2EDuration="2.860084955s" podCreationTimestamp="2025-11-25 10:12:59 +0000 UTC" firstStartedPulling="2025-11-25 10:13:00.858376971 +0000 UTC m=+2186.711370347" lastFinishedPulling="2025-11-25 10:13:01.281231835 +0000 UTC m=+2187.134225211" observedRunningTime="2025-11-25 10:13:01.856347453 +0000 UTC m=+2187.709340829" watchObservedRunningTime="2025-11-25 10:13:01.860084955 +0000 UTC m=+2187.713078331" Nov 25 10:13:09 crc kubenswrapper[4854]: I1125 10:13:09.935097 4854 generic.go:334] "Generic (PLEG): container finished" podID="ed3268b7-e706-4ec0-9bb9-7e6c86414b2a" containerID="efaab2868a5d783d5163ef5b4c4e5b2ca30a49500d0d78da39caf5c6a7c3027e" exitCode=0 Nov 25 10:13:09 crc kubenswrapper[4854]: I1125 10:13:09.935181 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-8pcrq" event={"ID":"ed3268b7-e706-4ec0-9bb9-7e6c86414b2a","Type":"ContainerDied","Data":"efaab2868a5d783d5163ef5b4c4e5b2ca30a49500d0d78da39caf5c6a7c3027e"} Nov 25 10:13:11 crc kubenswrapper[4854]: I1125 10:13:11.488043 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-8pcrq" Nov 25 10:13:11 crc kubenswrapper[4854]: I1125 10:13:11.507557 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ed3268b7-e706-4ec0-9bb9-7e6c86414b2a-ssh-key-openstack-edpm-ipam\") pod \"ed3268b7-e706-4ec0-9bb9-7e6c86414b2a\" (UID: \"ed3268b7-e706-4ec0-9bb9-7e6c86414b2a\") " Nov 25 10:13:11 crc kubenswrapper[4854]: I1125 10:13:11.507907 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/ed3268b7-e706-4ec0-9bb9-7e6c86414b2a-inventory-0\") pod \"ed3268b7-e706-4ec0-9bb9-7e6c86414b2a\" (UID: \"ed3268b7-e706-4ec0-9bb9-7e6c86414b2a\") " Nov 25 10:13:11 crc kubenswrapper[4854]: I1125 10:13:11.508031 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rtlfl\" (UniqueName: \"kubernetes.io/projected/ed3268b7-e706-4ec0-9bb9-7e6c86414b2a-kube-api-access-rtlfl\") pod \"ed3268b7-e706-4ec0-9bb9-7e6c86414b2a\" (UID: \"ed3268b7-e706-4ec0-9bb9-7e6c86414b2a\") " Nov 25 10:13:11 crc kubenswrapper[4854]: I1125 10:13:11.513806 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed3268b7-e706-4ec0-9bb9-7e6c86414b2a-kube-api-access-rtlfl" (OuterVolumeSpecName: "kube-api-access-rtlfl") pod "ed3268b7-e706-4ec0-9bb9-7e6c86414b2a" (UID: "ed3268b7-e706-4ec0-9bb9-7e6c86414b2a"). InnerVolumeSpecName "kube-api-access-rtlfl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:13:11 crc kubenswrapper[4854]: I1125 10:13:11.541106 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed3268b7-e706-4ec0-9bb9-7e6c86414b2a-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "ed3268b7-e706-4ec0-9bb9-7e6c86414b2a" (UID: "ed3268b7-e706-4ec0-9bb9-7e6c86414b2a"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:13:11 crc kubenswrapper[4854]: I1125 10:13:11.550418 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed3268b7-e706-4ec0-9bb9-7e6c86414b2a-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "ed3268b7-e706-4ec0-9bb9-7e6c86414b2a" (UID: "ed3268b7-e706-4ec0-9bb9-7e6c86414b2a"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:13:11 crc kubenswrapper[4854]: I1125 10:13:11.611009 4854 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/ed3268b7-e706-4ec0-9bb9-7e6c86414b2a-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:11 crc kubenswrapper[4854]: I1125 10:13:11.611054 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rtlfl\" (UniqueName: \"kubernetes.io/projected/ed3268b7-e706-4ec0-9bb9-7e6c86414b2a-kube-api-access-rtlfl\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:11 crc kubenswrapper[4854]: I1125 10:13:11.611066 4854 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ed3268b7-e706-4ec0-9bb9-7e6c86414b2a-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:11 crc kubenswrapper[4854]: I1125 10:13:11.957565 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-8pcrq" event={"ID":"ed3268b7-e706-4ec0-9bb9-7e6c86414b2a","Type":"ContainerDied","Data":"fcb53c0fadcced94a6b326eb43b8eb3a71ec451d47d9570216f227390c51a0bc"} Nov 25 10:13:11 crc kubenswrapper[4854]: I1125 10:13:11.957605 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fcb53c0fadcced94a6b326eb43b8eb3a71ec451d47d9570216f227390c51a0bc" Nov 25 10:13:11 crc kubenswrapper[4854]: I1125 10:13:11.957606 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-8pcrq" Nov 25 10:13:12 crc kubenswrapper[4854]: I1125 10:13:12.085726 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-97tkq"] Nov 25 10:13:12 crc kubenswrapper[4854]: E1125 10:13:12.086624 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed3268b7-e706-4ec0-9bb9-7e6c86414b2a" containerName="ssh-known-hosts-edpm-deployment" Nov 25 10:13:12 crc kubenswrapper[4854]: I1125 10:13:12.086649 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed3268b7-e706-4ec0-9bb9-7e6c86414b2a" containerName="ssh-known-hosts-edpm-deployment" Nov 25 10:13:12 crc kubenswrapper[4854]: I1125 10:13:12.087020 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed3268b7-e706-4ec0-9bb9-7e6c86414b2a" containerName="ssh-known-hosts-edpm-deployment" Nov 25 10:13:12 crc kubenswrapper[4854]: I1125 10:13:12.088107 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-97tkq" Nov 25 10:13:12 crc kubenswrapper[4854]: I1125 10:13:12.090102 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 10:13:12 crc kubenswrapper[4854]: I1125 10:13:12.091801 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:13:12 crc kubenswrapper[4854]: I1125 10:13:12.091817 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 10:13:12 crc kubenswrapper[4854]: I1125 10:13:12.093040 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-6xbdw" Nov 25 10:13:12 crc kubenswrapper[4854]: I1125 10:13:12.100569 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-97tkq"] Nov 25 10:13:12 crc kubenswrapper[4854]: I1125 10:13:12.123066 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0eb416d3-dc11-438b-9f3b-c0660e4c81f8-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-97tkq\" (UID: \"0eb416d3-dc11-438b-9f3b-c0660e4c81f8\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-97tkq" Nov 25 10:13:12 crc kubenswrapper[4854]: I1125 10:13:12.123212 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rmmgq\" (UniqueName: \"kubernetes.io/projected/0eb416d3-dc11-438b-9f3b-c0660e4c81f8-kube-api-access-rmmgq\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-97tkq\" (UID: \"0eb416d3-dc11-438b-9f3b-c0660e4c81f8\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-97tkq" Nov 25 10:13:12 crc kubenswrapper[4854]: I1125 10:13:12.123336 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0eb416d3-dc11-438b-9f3b-c0660e4c81f8-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-97tkq\" (UID: \"0eb416d3-dc11-438b-9f3b-c0660e4c81f8\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-97tkq" Nov 25 10:13:12 crc kubenswrapper[4854]: I1125 10:13:12.225954 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0eb416d3-dc11-438b-9f3b-c0660e4c81f8-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-97tkq\" (UID: \"0eb416d3-dc11-438b-9f3b-c0660e4c81f8\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-97tkq" Nov 25 10:13:12 crc kubenswrapper[4854]: I1125 10:13:12.226095 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rmmgq\" (UniqueName: \"kubernetes.io/projected/0eb416d3-dc11-438b-9f3b-c0660e4c81f8-kube-api-access-rmmgq\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-97tkq\" (UID: \"0eb416d3-dc11-438b-9f3b-c0660e4c81f8\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-97tkq" Nov 25 10:13:12 crc kubenswrapper[4854]: I1125 10:13:12.226163 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0eb416d3-dc11-438b-9f3b-c0660e4c81f8-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-97tkq\" (UID: \"0eb416d3-dc11-438b-9f3b-c0660e4c81f8\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-97tkq" Nov 25 10:13:12 crc kubenswrapper[4854]: I1125 10:13:12.232042 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0eb416d3-dc11-438b-9f3b-c0660e4c81f8-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-97tkq\" (UID: \"0eb416d3-dc11-438b-9f3b-c0660e4c81f8\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-97tkq" Nov 25 10:13:12 crc kubenswrapper[4854]: I1125 10:13:12.232064 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0eb416d3-dc11-438b-9f3b-c0660e4c81f8-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-97tkq\" (UID: \"0eb416d3-dc11-438b-9f3b-c0660e4c81f8\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-97tkq" Nov 25 10:13:12 crc kubenswrapper[4854]: I1125 10:13:12.243395 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rmmgq\" (UniqueName: \"kubernetes.io/projected/0eb416d3-dc11-438b-9f3b-c0660e4c81f8-kube-api-access-rmmgq\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-97tkq\" (UID: \"0eb416d3-dc11-438b-9f3b-c0660e4c81f8\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-97tkq" Nov 25 10:13:12 crc kubenswrapper[4854]: I1125 10:13:12.407922 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-97tkq" Nov 25 10:13:13 crc kubenswrapper[4854]: I1125 10:13:13.028992 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-97tkq"] Nov 25 10:13:13 crc kubenswrapper[4854]: I1125 10:13:13.979899 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-97tkq" event={"ID":"0eb416d3-dc11-438b-9f3b-c0660e4c81f8","Type":"ContainerStarted","Data":"533f914401fd2300b920d0643245b10a3f9de0abc6d1df6c9340fecea2544b90"} Nov 25 10:13:13 crc kubenswrapper[4854]: I1125 10:13:13.980230 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-97tkq" event={"ID":"0eb416d3-dc11-438b-9f3b-c0660e4c81f8","Type":"ContainerStarted","Data":"14339aab4795203d87b769e1f6912e12272e6f6e113a1b267c039484c96cb58b"} Nov 25 10:13:14 crc kubenswrapper[4854]: I1125 10:13:14.007502 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-97tkq" podStartSLOduration=1.5785315949999998 podStartE2EDuration="2.007483457s" podCreationTimestamp="2025-11-25 10:13:12 +0000 UTC" firstStartedPulling="2025-11-25 10:13:13.018517143 +0000 UTC m=+2198.871510519" lastFinishedPulling="2025-11-25 10:13:13.447469005 +0000 UTC m=+2199.300462381" observedRunningTime="2025-11-25 10:13:13.997655367 +0000 UTC m=+2199.850648743" watchObservedRunningTime="2025-11-25 10:13:14.007483457 +0000 UTC m=+2199.860476833" Nov 25 10:13:23 crc kubenswrapper[4854]: I1125 10:13:23.071003 4854 generic.go:334] "Generic (PLEG): container finished" podID="0eb416d3-dc11-438b-9f3b-c0660e4c81f8" containerID="533f914401fd2300b920d0643245b10a3f9de0abc6d1df6c9340fecea2544b90" exitCode=0 Nov 25 10:13:23 crc kubenswrapper[4854]: I1125 10:13:23.071097 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-97tkq" event={"ID":"0eb416d3-dc11-438b-9f3b-c0660e4c81f8","Type":"ContainerDied","Data":"533f914401fd2300b920d0643245b10a3f9de0abc6d1df6c9340fecea2544b90"} Nov 25 10:13:25 crc kubenswrapper[4854]: I1125 10:13:24.708307 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-97tkq" Nov 25 10:13:25 crc kubenswrapper[4854]: I1125 10:13:24.842763 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rmmgq\" (UniqueName: \"kubernetes.io/projected/0eb416d3-dc11-438b-9f3b-c0660e4c81f8-kube-api-access-rmmgq\") pod \"0eb416d3-dc11-438b-9f3b-c0660e4c81f8\" (UID: \"0eb416d3-dc11-438b-9f3b-c0660e4c81f8\") " Nov 25 10:13:25 crc kubenswrapper[4854]: I1125 10:13:24.843118 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0eb416d3-dc11-438b-9f3b-c0660e4c81f8-inventory\") pod \"0eb416d3-dc11-438b-9f3b-c0660e4c81f8\" (UID: \"0eb416d3-dc11-438b-9f3b-c0660e4c81f8\") " Nov 25 10:13:25 crc kubenswrapper[4854]: I1125 10:13:24.843344 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0eb416d3-dc11-438b-9f3b-c0660e4c81f8-ssh-key\") pod \"0eb416d3-dc11-438b-9f3b-c0660e4c81f8\" (UID: \"0eb416d3-dc11-438b-9f3b-c0660e4c81f8\") " Nov 25 10:13:25 crc kubenswrapper[4854]: I1125 10:13:24.849476 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0eb416d3-dc11-438b-9f3b-c0660e4c81f8-kube-api-access-rmmgq" (OuterVolumeSpecName: "kube-api-access-rmmgq") pod "0eb416d3-dc11-438b-9f3b-c0660e4c81f8" (UID: "0eb416d3-dc11-438b-9f3b-c0660e4c81f8"). InnerVolumeSpecName "kube-api-access-rmmgq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:13:25 crc kubenswrapper[4854]: I1125 10:13:24.885476 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0eb416d3-dc11-438b-9f3b-c0660e4c81f8-inventory" (OuterVolumeSpecName: "inventory") pod "0eb416d3-dc11-438b-9f3b-c0660e4c81f8" (UID: "0eb416d3-dc11-438b-9f3b-c0660e4c81f8"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:13:25 crc kubenswrapper[4854]: I1125 10:13:24.889807 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0eb416d3-dc11-438b-9f3b-c0660e4c81f8-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "0eb416d3-dc11-438b-9f3b-c0660e4c81f8" (UID: "0eb416d3-dc11-438b-9f3b-c0660e4c81f8"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:13:25 crc kubenswrapper[4854]: I1125 10:13:24.945821 4854 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0eb416d3-dc11-438b-9f3b-c0660e4c81f8-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:25 crc kubenswrapper[4854]: I1125 10:13:24.945849 4854 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0eb416d3-dc11-438b-9f3b-c0660e4c81f8-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:25 crc kubenswrapper[4854]: I1125 10:13:24.945858 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rmmgq\" (UniqueName: \"kubernetes.io/projected/0eb416d3-dc11-438b-9f3b-c0660e4c81f8-kube-api-access-rmmgq\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:25 crc kubenswrapper[4854]: I1125 10:13:25.029205 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:13:25 crc kubenswrapper[4854]: I1125 10:13:25.029273 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:13:25 crc kubenswrapper[4854]: I1125 10:13:25.099136 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-97tkq" event={"ID":"0eb416d3-dc11-438b-9f3b-c0660e4c81f8","Type":"ContainerDied","Data":"14339aab4795203d87b769e1f6912e12272e6f6e113a1b267c039484c96cb58b"} Nov 25 10:13:25 crc kubenswrapper[4854]: I1125 10:13:25.099186 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="14339aab4795203d87b769e1f6912e12272e6f6e113a1b267c039484c96cb58b" Nov 25 10:13:25 crc kubenswrapper[4854]: I1125 10:13:25.099216 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-97tkq" Nov 25 10:13:25 crc kubenswrapper[4854]: I1125 10:13:25.170558 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-87xs7"] Nov 25 10:13:25 crc kubenswrapper[4854]: E1125 10:13:25.171299 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0eb416d3-dc11-438b-9f3b-c0660e4c81f8" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 25 10:13:25 crc kubenswrapper[4854]: I1125 10:13:25.171321 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="0eb416d3-dc11-438b-9f3b-c0660e4c81f8" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 25 10:13:25 crc kubenswrapper[4854]: I1125 10:13:25.171659 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="0eb416d3-dc11-438b-9f3b-c0660e4c81f8" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 25 10:13:25 crc kubenswrapper[4854]: I1125 10:13:25.172814 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-87xs7" Nov 25 10:13:25 crc kubenswrapper[4854]: I1125 10:13:25.174486 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-6xbdw" Nov 25 10:13:25 crc kubenswrapper[4854]: I1125 10:13:25.175284 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 10:13:25 crc kubenswrapper[4854]: I1125 10:13:25.175568 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:13:25 crc kubenswrapper[4854]: I1125 10:13:25.176123 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 10:13:25 crc kubenswrapper[4854]: I1125 10:13:25.186456 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-87xs7"] Nov 25 10:13:25 crc kubenswrapper[4854]: I1125 10:13:25.357337 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g8c7q\" (UniqueName: \"kubernetes.io/projected/d9434a1f-53bb-45ac-996f-e2e32c7b447f-kube-api-access-g8c7q\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-87xs7\" (UID: \"d9434a1f-53bb-45ac-996f-e2e32c7b447f\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-87xs7" Nov 25 10:13:25 crc kubenswrapper[4854]: I1125 10:13:25.357439 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d9434a1f-53bb-45ac-996f-e2e32c7b447f-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-87xs7\" (UID: \"d9434a1f-53bb-45ac-996f-e2e32c7b447f\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-87xs7" Nov 25 10:13:25 crc kubenswrapper[4854]: I1125 10:13:25.357594 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d9434a1f-53bb-45ac-996f-e2e32c7b447f-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-87xs7\" (UID: \"d9434a1f-53bb-45ac-996f-e2e32c7b447f\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-87xs7" Nov 25 10:13:25 crc kubenswrapper[4854]: I1125 10:13:25.460010 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d9434a1f-53bb-45ac-996f-e2e32c7b447f-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-87xs7\" (UID: \"d9434a1f-53bb-45ac-996f-e2e32c7b447f\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-87xs7" Nov 25 10:13:25 crc kubenswrapper[4854]: I1125 10:13:25.460177 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g8c7q\" (UniqueName: \"kubernetes.io/projected/d9434a1f-53bb-45ac-996f-e2e32c7b447f-kube-api-access-g8c7q\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-87xs7\" (UID: \"d9434a1f-53bb-45ac-996f-e2e32c7b447f\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-87xs7" Nov 25 10:13:25 crc kubenswrapper[4854]: I1125 10:13:25.460275 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d9434a1f-53bb-45ac-996f-e2e32c7b447f-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-87xs7\" (UID: \"d9434a1f-53bb-45ac-996f-e2e32c7b447f\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-87xs7" Nov 25 10:13:25 crc kubenswrapper[4854]: I1125 10:13:25.464500 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d9434a1f-53bb-45ac-996f-e2e32c7b447f-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-87xs7\" (UID: \"d9434a1f-53bb-45ac-996f-e2e32c7b447f\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-87xs7" Nov 25 10:13:25 crc kubenswrapper[4854]: I1125 10:13:25.466056 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d9434a1f-53bb-45ac-996f-e2e32c7b447f-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-87xs7\" (UID: \"d9434a1f-53bb-45ac-996f-e2e32c7b447f\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-87xs7" Nov 25 10:13:25 crc kubenswrapper[4854]: I1125 10:13:25.478152 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g8c7q\" (UniqueName: \"kubernetes.io/projected/d9434a1f-53bb-45ac-996f-e2e32c7b447f-kube-api-access-g8c7q\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-87xs7\" (UID: \"d9434a1f-53bb-45ac-996f-e2e32c7b447f\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-87xs7" Nov 25 10:13:25 crc kubenswrapper[4854]: I1125 10:13:25.500608 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-87xs7" Nov 25 10:13:26 crc kubenswrapper[4854]: I1125 10:13:26.086902 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-87xs7"] Nov 25 10:13:26 crc kubenswrapper[4854]: I1125 10:13:26.111136 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-87xs7" event={"ID":"d9434a1f-53bb-45ac-996f-e2e32c7b447f","Type":"ContainerStarted","Data":"e3ca499cbf8c711862487c8654bb10477d63cde53d00a2e460e3140c31634b64"} Nov 25 10:13:27 crc kubenswrapper[4854]: I1125 10:13:27.121214 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-87xs7" event={"ID":"d9434a1f-53bb-45ac-996f-e2e32c7b447f","Type":"ContainerStarted","Data":"0640692d2d2eb831934686193cee6d2570a78a5035659ae497982bdfac6bab8f"} Nov 25 10:13:27 crc kubenswrapper[4854]: I1125 10:13:27.141364 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-87xs7" podStartSLOduration=1.692564647 podStartE2EDuration="2.141347624s" podCreationTimestamp="2025-11-25 10:13:25 +0000 UTC" firstStartedPulling="2025-11-25 10:13:26.084480134 +0000 UTC m=+2211.937473510" lastFinishedPulling="2025-11-25 10:13:26.533263111 +0000 UTC m=+2212.386256487" observedRunningTime="2025-11-25 10:13:27.134625658 +0000 UTC m=+2212.987619044" watchObservedRunningTime="2025-11-25 10:13:27.141347624 +0000 UTC m=+2212.994341000" Nov 25 10:13:37 crc kubenswrapper[4854]: I1125 10:13:37.228081 4854 generic.go:334] "Generic (PLEG): container finished" podID="d9434a1f-53bb-45ac-996f-e2e32c7b447f" containerID="0640692d2d2eb831934686193cee6d2570a78a5035659ae497982bdfac6bab8f" exitCode=0 Nov 25 10:13:37 crc kubenswrapper[4854]: I1125 10:13:37.228164 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-87xs7" event={"ID":"d9434a1f-53bb-45ac-996f-e2e32c7b447f","Type":"ContainerDied","Data":"0640692d2d2eb831934686193cee6d2570a78a5035659ae497982bdfac6bab8f"} Nov 25 10:13:38 crc kubenswrapper[4854]: I1125 10:13:38.709664 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-87xs7" Nov 25 10:13:38 crc kubenswrapper[4854]: I1125 10:13:38.912343 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g8c7q\" (UniqueName: \"kubernetes.io/projected/d9434a1f-53bb-45ac-996f-e2e32c7b447f-kube-api-access-g8c7q\") pod \"d9434a1f-53bb-45ac-996f-e2e32c7b447f\" (UID: \"d9434a1f-53bb-45ac-996f-e2e32c7b447f\") " Nov 25 10:13:38 crc kubenswrapper[4854]: I1125 10:13:38.912609 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d9434a1f-53bb-45ac-996f-e2e32c7b447f-ssh-key\") pod \"d9434a1f-53bb-45ac-996f-e2e32c7b447f\" (UID: \"d9434a1f-53bb-45ac-996f-e2e32c7b447f\") " Nov 25 10:13:38 crc kubenswrapper[4854]: I1125 10:13:38.912730 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d9434a1f-53bb-45ac-996f-e2e32c7b447f-inventory\") pod \"d9434a1f-53bb-45ac-996f-e2e32c7b447f\" (UID: \"d9434a1f-53bb-45ac-996f-e2e32c7b447f\") " Nov 25 10:13:38 crc kubenswrapper[4854]: I1125 10:13:38.918147 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d9434a1f-53bb-45ac-996f-e2e32c7b447f-kube-api-access-g8c7q" (OuterVolumeSpecName: "kube-api-access-g8c7q") pod "d9434a1f-53bb-45ac-996f-e2e32c7b447f" (UID: "d9434a1f-53bb-45ac-996f-e2e32c7b447f"). InnerVolumeSpecName "kube-api-access-g8c7q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:13:38 crc kubenswrapper[4854]: I1125 10:13:38.946664 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9434a1f-53bb-45ac-996f-e2e32c7b447f-inventory" (OuterVolumeSpecName: "inventory") pod "d9434a1f-53bb-45ac-996f-e2e32c7b447f" (UID: "d9434a1f-53bb-45ac-996f-e2e32c7b447f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:13:38 crc kubenswrapper[4854]: I1125 10:13:38.949937 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9434a1f-53bb-45ac-996f-e2e32c7b447f-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "d9434a1f-53bb-45ac-996f-e2e32c7b447f" (UID: "d9434a1f-53bb-45ac-996f-e2e32c7b447f"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.015682 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g8c7q\" (UniqueName: \"kubernetes.io/projected/d9434a1f-53bb-45ac-996f-e2e32c7b447f-kube-api-access-g8c7q\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.015714 4854 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d9434a1f-53bb-45ac-996f-e2e32c7b447f-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.015733 4854 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d9434a1f-53bb-45ac-996f-e2e32c7b447f-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.249434 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-87xs7" event={"ID":"d9434a1f-53bb-45ac-996f-e2e32c7b447f","Type":"ContainerDied","Data":"e3ca499cbf8c711862487c8654bb10477d63cde53d00a2e460e3140c31634b64"} Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.249474 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e3ca499cbf8c711862487c8654bb10477d63cde53d00a2e460e3140c31634b64" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.249533 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-87xs7" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.340375 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd"] Nov 25 10:13:39 crc kubenswrapper[4854]: E1125 10:13:39.340913 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9434a1f-53bb-45ac-996f-e2e32c7b447f" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.340937 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9434a1f-53bb-45ac-996f-e2e32c7b447f" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.341227 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="d9434a1f-53bb-45ac-996f-e2e32c7b447f" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.342567 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.344558 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.345278 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.345518 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.345601 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.345648 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.345605 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.345896 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-6xbdw" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.346941 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.351904 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.352644 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd"] Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.532076 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7e3d0041-519a-4744-9c38-ae10ebbd0812-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.532390 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7e3d0041-519a-4744-9c38-ae10ebbd0812-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.532441 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.532476 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.532556 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7e3d0041-519a-4744-9c38-ae10ebbd0812-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.532593 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.532725 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.532787 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7e3d0041-519a-4744-9c38-ae10ebbd0812-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.532845 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.532904 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-telemetry-power-monitoring-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.532934 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.533001 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7e3d0041-519a-4744-9c38-ae10ebbd0812-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.533045 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.533106 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqv9w\" (UniqueName: \"kubernetes.io/projected/7e3d0041-519a-4744-9c38-ae10ebbd0812-kube-api-access-gqv9w\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.533212 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.533257 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.635207 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7e3d0041-519a-4744-9c38-ae10ebbd0812-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.635271 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.635310 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-telemetry-power-monitoring-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.635334 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.635363 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7e3d0041-519a-4744-9c38-ae10ebbd0812-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.635388 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.635425 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqv9w\" (UniqueName: \"kubernetes.io/projected/7e3d0041-519a-4744-9c38-ae10ebbd0812-kube-api-access-gqv9w\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.635475 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.635506 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.635567 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7e3d0041-519a-4744-9c38-ae10ebbd0812-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.635585 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7e3d0041-519a-4744-9c38-ae10ebbd0812-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.635602 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.635619 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.635648 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7e3d0041-519a-4744-9c38-ae10ebbd0812-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.635685 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.635748 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.640542 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7e3d0041-519a-4744-9c38-ae10ebbd0812-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.642562 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7e3d0041-519a-4744-9c38-ae10ebbd0812-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.644468 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7e3d0041-519a-4744-9c38-ae10ebbd0812-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.644771 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.645125 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.646213 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.646659 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.647486 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.649297 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.649935 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-telemetry-power-monitoring-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.650126 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.651131 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7e3d0041-519a-4744-9c38-ae10ebbd0812-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.651743 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.652387 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7e3d0041-519a-4744-9c38-ae10ebbd0812-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.653463 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.654469 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqv9w\" (UniqueName: \"kubernetes.io/projected/7e3d0041-519a-4744-9c38-ae10ebbd0812-kube-api-access-gqv9w\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-9tltd\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:39 crc kubenswrapper[4854]: I1125 10:13:39.663192 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:13:40 crc kubenswrapper[4854]: I1125 10:13:40.248788 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd"] Nov 25 10:13:41 crc kubenswrapper[4854]: I1125 10:13:41.268997 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" event={"ID":"7e3d0041-519a-4744-9c38-ae10ebbd0812","Type":"ContainerStarted","Data":"a4aecd318159a4b4b63455070f194fdf577f21f4db5c3a81e58d013dcbad2424"} Nov 25 10:13:41 crc kubenswrapper[4854]: I1125 10:13:41.269488 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" event={"ID":"7e3d0041-519a-4744-9c38-ae10ebbd0812","Type":"ContainerStarted","Data":"7b7df95ad97aa653b19d09955c3db56b23bce32def457747e4a0260ab5c03cf9"} Nov 25 10:13:41 crc kubenswrapper[4854]: I1125 10:13:41.290276 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" podStartSLOduration=1.820962548 podStartE2EDuration="2.290240338s" podCreationTimestamp="2025-11-25 10:13:39 +0000 UTC" firstStartedPulling="2025-11-25 10:13:40.253823221 +0000 UTC m=+2226.106816597" lastFinishedPulling="2025-11-25 10:13:40.723101011 +0000 UTC m=+2226.576094387" observedRunningTime="2025-11-25 10:13:41.285197479 +0000 UTC m=+2227.138190865" watchObservedRunningTime="2025-11-25 10:13:41.290240338 +0000 UTC m=+2227.143233714" Nov 25 10:13:55 crc kubenswrapper[4854]: I1125 10:13:55.029229 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:13:55 crc kubenswrapper[4854]: I1125 10:13:55.029831 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:13:55 crc kubenswrapper[4854]: I1125 10:13:55.058568 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-sync-m27ln"] Nov 25 10:13:55 crc kubenswrapper[4854]: I1125 10:13:55.071850 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-sync-m27ln"] Nov 25 10:13:55 crc kubenswrapper[4854]: I1125 10:13:55.832096 4854 scope.go:117] "RemoveContainer" containerID="58304f7ced76aa1063423fc3490d0ef5b0eab8902366ec37e91c5e35090eda2c" Nov 25 10:13:57 crc kubenswrapper[4854]: I1125 10:13:57.028502 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8be70514-ed7b-499b-acb7-c973bd2590c2" path="/var/lib/kubelet/pods/8be70514-ed7b-499b-acb7-c973bd2590c2/volumes" Nov 25 10:14:25 crc kubenswrapper[4854]: I1125 10:14:25.029421 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:14:25 crc kubenswrapper[4854]: I1125 10:14:25.030221 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:14:25 crc kubenswrapper[4854]: I1125 10:14:25.030492 4854 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" Nov 25 10:14:25 crc kubenswrapper[4854]: I1125 10:14:25.032856 4854 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"087635fd301f31cb709ca3e31dcfb761f6ed178214d846b50fc60a19c19a65a8"} pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 10:14:25 crc kubenswrapper[4854]: I1125 10:14:25.033076 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" containerID="cri-o://087635fd301f31cb709ca3e31dcfb761f6ed178214d846b50fc60a19c19a65a8" gracePeriod=600 Nov 25 10:14:25 crc kubenswrapper[4854]: E1125 10:14:25.155179 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:14:25 crc kubenswrapper[4854]: I1125 10:14:25.781363 4854 generic.go:334] "Generic (PLEG): container finished" podID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerID="087635fd301f31cb709ca3e31dcfb761f6ed178214d846b50fc60a19c19a65a8" exitCode=0 Nov 25 10:14:25 crc kubenswrapper[4854]: I1125 10:14:25.781438 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerDied","Data":"087635fd301f31cb709ca3e31dcfb761f6ed178214d846b50fc60a19c19a65a8"} Nov 25 10:14:25 crc kubenswrapper[4854]: I1125 10:14:25.781792 4854 scope.go:117] "RemoveContainer" containerID="c0e53261c1b00dc00042ef24d7f5409ac882efa92a250513a37cff1d9e331ec1" Nov 25 10:14:25 crc kubenswrapper[4854]: I1125 10:14:25.782270 4854 scope.go:117] "RemoveContainer" containerID="087635fd301f31cb709ca3e31dcfb761f6ed178214d846b50fc60a19c19a65a8" Nov 25 10:14:25 crc kubenswrapper[4854]: E1125 10:14:25.782718 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:14:31 crc kubenswrapper[4854]: I1125 10:14:31.856619 4854 generic.go:334] "Generic (PLEG): container finished" podID="7e3d0041-519a-4744-9c38-ae10ebbd0812" containerID="a4aecd318159a4b4b63455070f194fdf577f21f4db5c3a81e58d013dcbad2424" exitCode=0 Nov 25 10:14:31 crc kubenswrapper[4854]: I1125 10:14:31.856730 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" event={"ID":"7e3d0041-519a-4744-9c38-ae10ebbd0812","Type":"ContainerDied","Data":"a4aecd318159a4b4b63455070f194fdf577f21f4db5c3a81e58d013dcbad2424"} Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.491156 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.578279 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7e3d0041-519a-4744-9c38-ae10ebbd0812-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"7e3d0041-519a-4744-9c38-ae10ebbd0812\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.578372 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-telemetry-power-monitoring-combined-ca-bundle\") pod \"7e3d0041-519a-4744-9c38-ae10ebbd0812\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.578404 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-ssh-key\") pod \"7e3d0041-519a-4744-9c38-ae10ebbd0812\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.578472 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7e3d0041-519a-4744-9c38-ae10ebbd0812-openstack-edpm-ipam-ovn-default-certs-0\") pod \"7e3d0041-519a-4744-9c38-ae10ebbd0812\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.578502 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-neutron-metadata-combined-ca-bundle\") pod \"7e3d0041-519a-4744-9c38-ae10ebbd0812\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.578557 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7e3d0041-519a-4744-9c38-ae10ebbd0812-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"7e3d0041-519a-4744-9c38-ae10ebbd0812\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.578580 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-ovn-combined-ca-bundle\") pod \"7e3d0041-519a-4744-9c38-ae10ebbd0812\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.578634 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7e3d0041-519a-4744-9c38-ae10ebbd0812-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"7e3d0041-519a-4744-9c38-ae10ebbd0812\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.578679 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-telemetry-combined-ca-bundle\") pod \"7e3d0041-519a-4744-9c38-ae10ebbd0812\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.578698 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gqv9w\" (UniqueName: \"kubernetes.io/projected/7e3d0041-519a-4744-9c38-ae10ebbd0812-kube-api-access-gqv9w\") pod \"7e3d0041-519a-4744-9c38-ae10ebbd0812\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.578743 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7e3d0041-519a-4744-9c38-ae10ebbd0812-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"7e3d0041-519a-4744-9c38-ae10ebbd0812\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.578810 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-libvirt-combined-ca-bundle\") pod \"7e3d0041-519a-4744-9c38-ae10ebbd0812\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.578836 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-repo-setup-combined-ca-bundle\") pod \"7e3d0041-519a-4744-9c38-ae10ebbd0812\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.578858 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-bootstrap-combined-ca-bundle\") pod \"7e3d0041-519a-4744-9c38-ae10ebbd0812\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.578939 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-nova-combined-ca-bundle\") pod \"7e3d0041-519a-4744-9c38-ae10ebbd0812\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.578988 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-inventory\") pod \"7e3d0041-519a-4744-9c38-ae10ebbd0812\" (UID: \"7e3d0041-519a-4744-9c38-ae10ebbd0812\") " Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.585563 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e3d0041-519a-4744-9c38-ae10ebbd0812-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "7e3d0041-519a-4744-9c38-ae10ebbd0812" (UID: "7e3d0041-519a-4744-9c38-ae10ebbd0812"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.586008 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e3d0041-519a-4744-9c38-ae10ebbd0812-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "7e3d0041-519a-4744-9c38-ae10ebbd0812" (UID: "7e3d0041-519a-4744-9c38-ae10ebbd0812"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.586216 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e3d0041-519a-4744-9c38-ae10ebbd0812-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "7e3d0041-519a-4744-9c38-ae10ebbd0812" (UID: "7e3d0041-519a-4744-9c38-ae10ebbd0812"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.586751 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-telemetry-power-monitoring-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-power-monitoring-combined-ca-bundle") pod "7e3d0041-519a-4744-9c38-ae10ebbd0812" (UID: "7e3d0041-519a-4744-9c38-ae10ebbd0812"). InnerVolumeSpecName "telemetry-power-monitoring-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.586805 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "7e3d0041-519a-4744-9c38-ae10ebbd0812" (UID: "7e3d0041-519a-4744-9c38-ae10ebbd0812"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.587104 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e3d0041-519a-4744-9c38-ae10ebbd0812-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "7e3d0041-519a-4744-9c38-ae10ebbd0812" (UID: "7e3d0041-519a-4744-9c38-ae10ebbd0812"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.588107 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e3d0041-519a-4744-9c38-ae10ebbd0812-kube-api-access-gqv9w" (OuterVolumeSpecName: "kube-api-access-gqv9w") pod "7e3d0041-519a-4744-9c38-ae10ebbd0812" (UID: "7e3d0041-519a-4744-9c38-ae10ebbd0812"). InnerVolumeSpecName "kube-api-access-gqv9w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.588935 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "7e3d0041-519a-4744-9c38-ae10ebbd0812" (UID: "7e3d0041-519a-4744-9c38-ae10ebbd0812"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.589614 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "7e3d0041-519a-4744-9c38-ae10ebbd0812" (UID: "7e3d0041-519a-4744-9c38-ae10ebbd0812"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.590886 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "7e3d0041-519a-4744-9c38-ae10ebbd0812" (UID: "7e3d0041-519a-4744-9c38-ae10ebbd0812"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.591027 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "7e3d0041-519a-4744-9c38-ae10ebbd0812" (UID: "7e3d0041-519a-4744-9c38-ae10ebbd0812"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.591555 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "7e3d0041-519a-4744-9c38-ae10ebbd0812" (UID: "7e3d0041-519a-4744-9c38-ae10ebbd0812"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.592264 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e3d0041-519a-4744-9c38-ae10ebbd0812-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0") pod "7e3d0041-519a-4744-9c38-ae10ebbd0812" (UID: "7e3d0041-519a-4744-9c38-ae10ebbd0812"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.593107 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "7e3d0041-519a-4744-9c38-ae10ebbd0812" (UID: "7e3d0041-519a-4744-9c38-ae10ebbd0812"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.621177 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "7e3d0041-519a-4744-9c38-ae10ebbd0812" (UID: "7e3d0041-519a-4744-9c38-ae10ebbd0812"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.622521 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-inventory" (OuterVolumeSpecName: "inventory") pod "7e3d0041-519a-4744-9c38-ae10ebbd0812" (UID: "7e3d0041-519a-4744-9c38-ae10ebbd0812"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.680701 4854 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7e3d0041-519a-4744-9c38-ae10ebbd0812-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.681444 4854 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.682299 4854 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7e3d0041-519a-4744-9c38-ae10ebbd0812-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.682324 4854 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.682365 4854 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7e3d0041-519a-4744-9c38-ae10ebbd0812-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.682383 4854 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.682397 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gqv9w\" (UniqueName: \"kubernetes.io/projected/7e3d0041-519a-4744-9c38-ae10ebbd0812-kube-api-access-gqv9w\") on node \"crc\" DevicePath \"\"" Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.682466 4854 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7e3d0041-519a-4744-9c38-ae10ebbd0812-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.682484 4854 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.682499 4854 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.682540 4854 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.682554 4854 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.682567 4854 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.682580 4854 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7e3d0041-519a-4744-9c38-ae10ebbd0812-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.682620 4854 reconciler_common.go:293] "Volume detached for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-telemetry-power-monitoring-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.682633 4854 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7e3d0041-519a-4744-9c38-ae10ebbd0812-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.891477 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" event={"ID":"7e3d0041-519a-4744-9c38-ae10ebbd0812","Type":"ContainerDied","Data":"7b7df95ad97aa653b19d09955c3db56b23bce32def457747e4a0260ab5c03cf9"} Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.891528 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7b7df95ad97aa653b19d09955c3db56b23bce32def457747e4a0260ab5c03cf9" Nov 25 10:14:33 crc kubenswrapper[4854]: I1125 10:14:33.891571 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-9tltd" Nov 25 10:14:34 crc kubenswrapper[4854]: I1125 10:14:34.037183 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-hrlfd"] Nov 25 10:14:34 crc kubenswrapper[4854]: E1125 10:14:34.037986 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e3d0041-519a-4744-9c38-ae10ebbd0812" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 25 10:14:34 crc kubenswrapper[4854]: I1125 10:14:34.038019 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e3d0041-519a-4744-9c38-ae10ebbd0812" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 25 10:14:34 crc kubenswrapper[4854]: I1125 10:14:34.038424 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e3d0041-519a-4744-9c38-ae10ebbd0812" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 25 10:14:34 crc kubenswrapper[4854]: I1125 10:14:34.039775 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hrlfd" Nov 25 10:14:34 crc kubenswrapper[4854]: I1125 10:14:34.041840 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 10:14:34 crc kubenswrapper[4854]: I1125 10:14:34.042128 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Nov 25 10:14:34 crc kubenswrapper[4854]: I1125 10:14:34.042176 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 10:14:34 crc kubenswrapper[4854]: I1125 10:14:34.042192 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-6xbdw" Nov 25 10:14:34 crc kubenswrapper[4854]: I1125 10:14:34.043205 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:14:34 crc kubenswrapper[4854]: I1125 10:14:34.057891 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-hrlfd"] Nov 25 10:14:34 crc kubenswrapper[4854]: I1125 10:14:34.091335 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6df14f5b-e51a-4692-8452-03f1cfa9eabb-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-hrlfd\" (UID: \"6df14f5b-e51a-4692-8452-03f1cfa9eabb\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hrlfd" Nov 25 10:14:34 crc kubenswrapper[4854]: I1125 10:14:34.091405 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6df14f5b-e51a-4692-8452-03f1cfa9eabb-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-hrlfd\" (UID: \"6df14f5b-e51a-4692-8452-03f1cfa9eabb\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hrlfd" Nov 25 10:14:34 crc kubenswrapper[4854]: I1125 10:14:34.091443 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/6df14f5b-e51a-4692-8452-03f1cfa9eabb-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-hrlfd\" (UID: \"6df14f5b-e51a-4692-8452-03f1cfa9eabb\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hrlfd" Nov 25 10:14:34 crc kubenswrapper[4854]: I1125 10:14:34.091582 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6df14f5b-e51a-4692-8452-03f1cfa9eabb-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-hrlfd\" (UID: \"6df14f5b-e51a-4692-8452-03f1cfa9eabb\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hrlfd" Nov 25 10:14:34 crc kubenswrapper[4854]: I1125 10:14:34.091600 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mlbhz\" (UniqueName: \"kubernetes.io/projected/6df14f5b-e51a-4692-8452-03f1cfa9eabb-kube-api-access-mlbhz\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-hrlfd\" (UID: \"6df14f5b-e51a-4692-8452-03f1cfa9eabb\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hrlfd" Nov 25 10:14:34 crc kubenswrapper[4854]: I1125 10:14:34.193416 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6df14f5b-e51a-4692-8452-03f1cfa9eabb-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-hrlfd\" (UID: \"6df14f5b-e51a-4692-8452-03f1cfa9eabb\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hrlfd" Nov 25 10:14:34 crc kubenswrapper[4854]: I1125 10:14:34.193474 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/6df14f5b-e51a-4692-8452-03f1cfa9eabb-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-hrlfd\" (UID: \"6df14f5b-e51a-4692-8452-03f1cfa9eabb\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hrlfd" Nov 25 10:14:34 crc kubenswrapper[4854]: I1125 10:14:34.193598 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6df14f5b-e51a-4692-8452-03f1cfa9eabb-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-hrlfd\" (UID: \"6df14f5b-e51a-4692-8452-03f1cfa9eabb\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hrlfd" Nov 25 10:14:34 crc kubenswrapper[4854]: I1125 10:14:34.193618 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mlbhz\" (UniqueName: \"kubernetes.io/projected/6df14f5b-e51a-4692-8452-03f1cfa9eabb-kube-api-access-mlbhz\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-hrlfd\" (UID: \"6df14f5b-e51a-4692-8452-03f1cfa9eabb\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hrlfd" Nov 25 10:14:34 crc kubenswrapper[4854]: I1125 10:14:34.193701 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6df14f5b-e51a-4692-8452-03f1cfa9eabb-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-hrlfd\" (UID: \"6df14f5b-e51a-4692-8452-03f1cfa9eabb\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hrlfd" Nov 25 10:14:34 crc kubenswrapper[4854]: I1125 10:14:34.194515 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/6df14f5b-e51a-4692-8452-03f1cfa9eabb-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-hrlfd\" (UID: \"6df14f5b-e51a-4692-8452-03f1cfa9eabb\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hrlfd" Nov 25 10:14:34 crc kubenswrapper[4854]: I1125 10:14:34.198172 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6df14f5b-e51a-4692-8452-03f1cfa9eabb-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-hrlfd\" (UID: \"6df14f5b-e51a-4692-8452-03f1cfa9eabb\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hrlfd" Nov 25 10:14:34 crc kubenswrapper[4854]: I1125 10:14:34.198369 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6df14f5b-e51a-4692-8452-03f1cfa9eabb-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-hrlfd\" (UID: \"6df14f5b-e51a-4692-8452-03f1cfa9eabb\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hrlfd" Nov 25 10:14:34 crc kubenswrapper[4854]: I1125 10:14:34.199658 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6df14f5b-e51a-4692-8452-03f1cfa9eabb-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-hrlfd\" (UID: \"6df14f5b-e51a-4692-8452-03f1cfa9eabb\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hrlfd" Nov 25 10:14:34 crc kubenswrapper[4854]: I1125 10:14:34.213887 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mlbhz\" (UniqueName: \"kubernetes.io/projected/6df14f5b-e51a-4692-8452-03f1cfa9eabb-kube-api-access-mlbhz\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-hrlfd\" (UID: \"6df14f5b-e51a-4692-8452-03f1cfa9eabb\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hrlfd" Nov 25 10:14:34 crc kubenswrapper[4854]: I1125 10:14:34.368514 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hrlfd" Nov 25 10:14:34 crc kubenswrapper[4854]: I1125 10:14:34.919241 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-hrlfd"] Nov 25 10:14:35 crc kubenswrapper[4854]: I1125 10:14:35.388031 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:14:35 crc kubenswrapper[4854]: I1125 10:14:35.916118 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hrlfd" event={"ID":"6df14f5b-e51a-4692-8452-03f1cfa9eabb","Type":"ContainerStarted","Data":"266e175ccb3e6e48e5f70f5b7f962262e65831c469e0b892cda587040c60b80b"} Nov 25 10:14:35 crc kubenswrapper[4854]: I1125 10:14:35.916423 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hrlfd" event={"ID":"6df14f5b-e51a-4692-8452-03f1cfa9eabb","Type":"ContainerStarted","Data":"b88eef82028aeacceccd9a8fdb23153c6f1e4517b06856ba0c945bc38b350c52"} Nov 25 10:14:35 crc kubenswrapper[4854]: I1125 10:14:35.945052 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hrlfd" podStartSLOduration=1.486800489 podStartE2EDuration="1.945035496s" podCreationTimestamp="2025-11-25 10:14:34 +0000 UTC" firstStartedPulling="2025-11-25 10:14:34.925969355 +0000 UTC m=+2280.778962751" lastFinishedPulling="2025-11-25 10:14:35.384204382 +0000 UTC m=+2281.237197758" observedRunningTime="2025-11-25 10:14:35.933070778 +0000 UTC m=+2281.786064154" watchObservedRunningTime="2025-11-25 10:14:35.945035496 +0000 UTC m=+2281.798028872" Nov 25 10:14:38 crc kubenswrapper[4854]: I1125 10:14:38.014245 4854 scope.go:117] "RemoveContainer" containerID="087635fd301f31cb709ca3e31dcfb761f6ed178214d846b50fc60a19c19a65a8" Nov 25 10:14:38 crc kubenswrapper[4854]: E1125 10:14:38.015004 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:14:49 crc kubenswrapper[4854]: I1125 10:14:49.013652 4854 scope.go:117] "RemoveContainer" containerID="087635fd301f31cb709ca3e31dcfb761f6ed178214d846b50fc60a19c19a65a8" Nov 25 10:14:49 crc kubenswrapper[4854]: E1125 10:14:49.014612 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:14:55 crc kubenswrapper[4854]: I1125 10:14:55.950550 4854 scope.go:117] "RemoveContainer" containerID="69a52e14d54bdf765bafe9eea6ac6093d5656f2f0ca9ac75d706f8305de1b6cc" Nov 25 10:15:00 crc kubenswrapper[4854]: I1125 10:15:00.144279 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401095-5shv8"] Nov 25 10:15:00 crc kubenswrapper[4854]: I1125 10:15:00.146885 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-5shv8" Nov 25 10:15:00 crc kubenswrapper[4854]: I1125 10:15:00.149311 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 10:15:00 crc kubenswrapper[4854]: I1125 10:15:00.149422 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 10:15:00 crc kubenswrapper[4854]: I1125 10:15:00.156353 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401095-5shv8"] Nov 25 10:15:00 crc kubenswrapper[4854]: I1125 10:15:00.274707 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2-secret-volume\") pod \"collect-profiles-29401095-5shv8\" (UID: \"48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-5shv8" Nov 25 10:15:00 crc kubenswrapper[4854]: I1125 10:15:00.274812 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2-config-volume\") pod \"collect-profiles-29401095-5shv8\" (UID: \"48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-5shv8" Nov 25 10:15:00 crc kubenswrapper[4854]: I1125 10:15:00.274871 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-22b4k\" (UniqueName: \"kubernetes.io/projected/48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2-kube-api-access-22b4k\") pod \"collect-profiles-29401095-5shv8\" (UID: \"48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-5shv8" Nov 25 10:15:00 crc kubenswrapper[4854]: I1125 10:15:00.377509 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2-secret-volume\") pod \"collect-profiles-29401095-5shv8\" (UID: \"48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-5shv8" Nov 25 10:15:00 crc kubenswrapper[4854]: I1125 10:15:00.377589 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2-config-volume\") pod \"collect-profiles-29401095-5shv8\" (UID: \"48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-5shv8" Nov 25 10:15:00 crc kubenswrapper[4854]: I1125 10:15:00.377697 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-22b4k\" (UniqueName: \"kubernetes.io/projected/48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2-kube-api-access-22b4k\") pod \"collect-profiles-29401095-5shv8\" (UID: \"48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-5shv8" Nov 25 10:15:00 crc kubenswrapper[4854]: I1125 10:15:00.379068 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2-config-volume\") pod \"collect-profiles-29401095-5shv8\" (UID: \"48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-5shv8" Nov 25 10:15:00 crc kubenswrapper[4854]: I1125 10:15:00.383490 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2-secret-volume\") pod \"collect-profiles-29401095-5shv8\" (UID: \"48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-5shv8" Nov 25 10:15:00 crc kubenswrapper[4854]: I1125 10:15:00.394914 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-22b4k\" (UniqueName: \"kubernetes.io/projected/48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2-kube-api-access-22b4k\") pod \"collect-profiles-29401095-5shv8\" (UID: \"48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-5shv8" Nov 25 10:15:00 crc kubenswrapper[4854]: I1125 10:15:00.491513 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-5shv8" Nov 25 10:15:01 crc kubenswrapper[4854]: I1125 10:15:01.010774 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401095-5shv8"] Nov 25 10:15:01 crc kubenswrapper[4854]: I1125 10:15:01.214823 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-5shv8" event={"ID":"48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2","Type":"ContainerStarted","Data":"14fe6b743a3fa38bda32b25cc316915c8661ae2e7a011841b835c439022627fe"} Nov 25 10:15:01 crc kubenswrapper[4854]: I1125 10:15:01.214975 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-5shv8" event={"ID":"48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2","Type":"ContainerStarted","Data":"87dfa9f4fa3f2134f76573e3e7758940f199f2cdf6a3be5578b6122f7aaedd04"} Nov 25 10:15:01 crc kubenswrapper[4854]: I1125 10:15:01.232496 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-5shv8" podStartSLOduration=1.232476633 podStartE2EDuration="1.232476633s" podCreationTimestamp="2025-11-25 10:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:15:01.231808845 +0000 UTC m=+2307.084802231" watchObservedRunningTime="2025-11-25 10:15:01.232476633 +0000 UTC m=+2307.085470009" Nov 25 10:15:02 crc kubenswrapper[4854]: I1125 10:15:02.227650 4854 generic.go:334] "Generic (PLEG): container finished" podID="48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2" containerID="14fe6b743a3fa38bda32b25cc316915c8661ae2e7a011841b835c439022627fe" exitCode=0 Nov 25 10:15:02 crc kubenswrapper[4854]: I1125 10:15:02.227734 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-5shv8" event={"ID":"48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2","Type":"ContainerDied","Data":"14fe6b743a3fa38bda32b25cc316915c8661ae2e7a011841b835c439022627fe"} Nov 25 10:15:03 crc kubenswrapper[4854]: I1125 10:15:03.014156 4854 scope.go:117] "RemoveContainer" containerID="087635fd301f31cb709ca3e31dcfb761f6ed178214d846b50fc60a19c19a65a8" Nov 25 10:15:03 crc kubenswrapper[4854]: E1125 10:15:03.014445 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:15:03 crc kubenswrapper[4854]: I1125 10:15:03.644750 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-5shv8" Nov 25 10:15:03 crc kubenswrapper[4854]: I1125 10:15:03.770871 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2-secret-volume\") pod \"48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2\" (UID: \"48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2\") " Nov 25 10:15:03 crc kubenswrapper[4854]: I1125 10:15:03.771168 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2-config-volume\") pod \"48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2\" (UID: \"48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2\") " Nov 25 10:15:03 crc kubenswrapper[4854]: I1125 10:15:03.771876 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2-config-volume" (OuterVolumeSpecName: "config-volume") pod "48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2" (UID: "48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:15:03 crc kubenswrapper[4854]: I1125 10:15:03.772283 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-22b4k\" (UniqueName: \"kubernetes.io/projected/48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2-kube-api-access-22b4k\") pod \"48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2\" (UID: \"48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2\") " Nov 25 10:15:03 crc kubenswrapper[4854]: I1125 10:15:03.773131 4854 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 10:15:03 crc kubenswrapper[4854]: I1125 10:15:03.777731 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2" (UID: "48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:15:03 crc kubenswrapper[4854]: I1125 10:15:03.778051 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2-kube-api-access-22b4k" (OuterVolumeSpecName: "kube-api-access-22b4k") pod "48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2" (UID: "48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2"). InnerVolumeSpecName "kube-api-access-22b4k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:15:03 crc kubenswrapper[4854]: I1125 10:15:03.875861 4854 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 10:15:03 crc kubenswrapper[4854]: I1125 10:15:03.876169 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-22b4k\" (UniqueName: \"kubernetes.io/projected/48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2-kube-api-access-22b4k\") on node \"crc\" DevicePath \"\"" Nov 25 10:15:04 crc kubenswrapper[4854]: I1125 10:15:04.250351 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-5shv8" event={"ID":"48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2","Type":"ContainerDied","Data":"87dfa9f4fa3f2134f76573e3e7758940f199f2cdf6a3be5578b6122f7aaedd04"} Nov 25 10:15:04 crc kubenswrapper[4854]: I1125 10:15:04.250399 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="87dfa9f4fa3f2134f76573e3e7758940f199f2cdf6a3be5578b6122f7aaedd04" Nov 25 10:15:04 crc kubenswrapper[4854]: I1125 10:15:04.250449 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-5shv8" Nov 25 10:15:04 crc kubenswrapper[4854]: I1125 10:15:04.316273 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401050-jck8z"] Nov 25 10:15:04 crc kubenswrapper[4854]: I1125 10:15:04.329281 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401050-jck8z"] Nov 25 10:15:05 crc kubenswrapper[4854]: I1125 10:15:05.034039 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7ea5d904-b5da-4a4a-9221-f808841b0052" path="/var/lib/kubelet/pods/7ea5d904-b5da-4a4a-9221-f808841b0052/volumes" Nov 25 10:15:16 crc kubenswrapper[4854]: I1125 10:15:16.013763 4854 scope.go:117] "RemoveContainer" containerID="087635fd301f31cb709ca3e31dcfb761f6ed178214d846b50fc60a19c19a65a8" Nov 25 10:15:16 crc kubenswrapper[4854]: E1125 10:15:16.015951 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:15:28 crc kubenswrapper[4854]: I1125 10:15:28.013766 4854 scope.go:117] "RemoveContainer" containerID="087635fd301f31cb709ca3e31dcfb761f6ed178214d846b50fc60a19c19a65a8" Nov 25 10:15:28 crc kubenswrapper[4854]: E1125 10:15:28.014598 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:15:40 crc kubenswrapper[4854]: I1125 10:15:40.014300 4854 scope.go:117] "RemoveContainer" containerID="087635fd301f31cb709ca3e31dcfb761f6ed178214d846b50fc60a19c19a65a8" Nov 25 10:15:40 crc kubenswrapper[4854]: E1125 10:15:40.015194 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:15:41 crc kubenswrapper[4854]: I1125 10:15:41.650529 4854 generic.go:334] "Generic (PLEG): container finished" podID="6df14f5b-e51a-4692-8452-03f1cfa9eabb" containerID="266e175ccb3e6e48e5f70f5b7f962262e65831c469e0b892cda587040c60b80b" exitCode=0 Nov 25 10:15:41 crc kubenswrapper[4854]: I1125 10:15:41.650708 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hrlfd" event={"ID":"6df14f5b-e51a-4692-8452-03f1cfa9eabb","Type":"ContainerDied","Data":"266e175ccb3e6e48e5f70f5b7f962262e65831c469e0b892cda587040c60b80b"} Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.161534 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hrlfd" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.232833 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/6df14f5b-e51a-4692-8452-03f1cfa9eabb-ovncontroller-config-0\") pod \"6df14f5b-e51a-4692-8452-03f1cfa9eabb\" (UID: \"6df14f5b-e51a-4692-8452-03f1cfa9eabb\") " Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.232919 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6df14f5b-e51a-4692-8452-03f1cfa9eabb-inventory\") pod \"6df14f5b-e51a-4692-8452-03f1cfa9eabb\" (UID: \"6df14f5b-e51a-4692-8452-03f1cfa9eabb\") " Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.232978 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6df14f5b-e51a-4692-8452-03f1cfa9eabb-ovn-combined-ca-bundle\") pod \"6df14f5b-e51a-4692-8452-03f1cfa9eabb\" (UID: \"6df14f5b-e51a-4692-8452-03f1cfa9eabb\") " Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.233086 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6df14f5b-e51a-4692-8452-03f1cfa9eabb-ssh-key\") pod \"6df14f5b-e51a-4692-8452-03f1cfa9eabb\" (UID: \"6df14f5b-e51a-4692-8452-03f1cfa9eabb\") " Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.233138 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mlbhz\" (UniqueName: \"kubernetes.io/projected/6df14f5b-e51a-4692-8452-03f1cfa9eabb-kube-api-access-mlbhz\") pod \"6df14f5b-e51a-4692-8452-03f1cfa9eabb\" (UID: \"6df14f5b-e51a-4692-8452-03f1cfa9eabb\") " Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.241815 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6df14f5b-e51a-4692-8452-03f1cfa9eabb-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "6df14f5b-e51a-4692-8452-03f1cfa9eabb" (UID: "6df14f5b-e51a-4692-8452-03f1cfa9eabb"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.251562 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6df14f5b-e51a-4692-8452-03f1cfa9eabb-kube-api-access-mlbhz" (OuterVolumeSpecName: "kube-api-access-mlbhz") pod "6df14f5b-e51a-4692-8452-03f1cfa9eabb" (UID: "6df14f5b-e51a-4692-8452-03f1cfa9eabb"). InnerVolumeSpecName "kube-api-access-mlbhz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.272500 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6df14f5b-e51a-4692-8452-03f1cfa9eabb-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "6df14f5b-e51a-4692-8452-03f1cfa9eabb" (UID: "6df14f5b-e51a-4692-8452-03f1cfa9eabb"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.275389 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6df14f5b-e51a-4692-8452-03f1cfa9eabb-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "6df14f5b-e51a-4692-8452-03f1cfa9eabb" (UID: "6df14f5b-e51a-4692-8452-03f1cfa9eabb"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.277315 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6df14f5b-e51a-4692-8452-03f1cfa9eabb-inventory" (OuterVolumeSpecName: "inventory") pod "6df14f5b-e51a-4692-8452-03f1cfa9eabb" (UID: "6df14f5b-e51a-4692-8452-03f1cfa9eabb"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.336294 4854 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/6df14f5b-e51a-4692-8452-03f1cfa9eabb-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.336338 4854 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6df14f5b-e51a-4692-8452-03f1cfa9eabb-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.336351 4854 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6df14f5b-e51a-4692-8452-03f1cfa9eabb-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.336361 4854 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6df14f5b-e51a-4692-8452-03f1cfa9eabb-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.336375 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mlbhz\" (UniqueName: \"kubernetes.io/projected/6df14f5b-e51a-4692-8452-03f1cfa9eabb-kube-api-access-mlbhz\") on node \"crc\" DevicePath \"\"" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.674104 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hrlfd" event={"ID":"6df14f5b-e51a-4692-8452-03f1cfa9eabb","Type":"ContainerDied","Data":"b88eef82028aeacceccd9a8fdb23153c6f1e4517b06856ba0c945bc38b350c52"} Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.674145 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b88eef82028aeacceccd9a8fdb23153c6f1e4517b06856ba0c945bc38b350c52" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.674196 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-hrlfd" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.777646 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7"] Nov 25 10:15:43 crc kubenswrapper[4854]: E1125 10:15:43.778664 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2" containerName="collect-profiles" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.778715 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2" containerName="collect-profiles" Nov 25 10:15:43 crc kubenswrapper[4854]: E1125 10:15:43.778738 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6df14f5b-e51a-4692-8452-03f1cfa9eabb" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.778746 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="6df14f5b-e51a-4692-8452-03f1cfa9eabb" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.779027 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2" containerName="collect-profiles" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.779063 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="6df14f5b-e51a-4692-8452-03f1cfa9eabb" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.780082 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.787494 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.787737 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.787880 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.788023 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-6xbdw" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.788179 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.788504 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.789482 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7"] Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.848912 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40369967-68d4-4fd5-aa53-432f0c6d4e0f-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7\" (UID: \"40369967-68d4-4fd5-aa53-432f0c6d4e0f\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.849077 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/40369967-68d4-4fd5-aa53-432f0c6d4e0f-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7\" (UID: \"40369967-68d4-4fd5-aa53-432f0c6d4e0f\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.849110 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/40369967-68d4-4fd5-aa53-432f0c6d4e0f-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7\" (UID: \"40369967-68d4-4fd5-aa53-432f0c6d4e0f\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.849278 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rw8mw\" (UniqueName: \"kubernetes.io/projected/40369967-68d4-4fd5-aa53-432f0c6d4e0f-kube-api-access-rw8mw\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7\" (UID: \"40369967-68d4-4fd5-aa53-432f0c6d4e0f\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.849353 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/40369967-68d4-4fd5-aa53-432f0c6d4e0f-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7\" (UID: \"40369967-68d4-4fd5-aa53-432f0c6d4e0f\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.849547 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/40369967-68d4-4fd5-aa53-432f0c6d4e0f-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7\" (UID: \"40369967-68d4-4fd5-aa53-432f0c6d4e0f\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.951956 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rw8mw\" (UniqueName: \"kubernetes.io/projected/40369967-68d4-4fd5-aa53-432f0c6d4e0f-kube-api-access-rw8mw\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7\" (UID: \"40369967-68d4-4fd5-aa53-432f0c6d4e0f\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.952046 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/40369967-68d4-4fd5-aa53-432f0c6d4e0f-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7\" (UID: \"40369967-68d4-4fd5-aa53-432f0c6d4e0f\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.952116 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/40369967-68d4-4fd5-aa53-432f0c6d4e0f-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7\" (UID: \"40369967-68d4-4fd5-aa53-432f0c6d4e0f\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.952223 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40369967-68d4-4fd5-aa53-432f0c6d4e0f-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7\" (UID: \"40369967-68d4-4fd5-aa53-432f0c6d4e0f\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.952255 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/40369967-68d4-4fd5-aa53-432f0c6d4e0f-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7\" (UID: \"40369967-68d4-4fd5-aa53-432f0c6d4e0f\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.952277 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/40369967-68d4-4fd5-aa53-432f0c6d4e0f-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7\" (UID: \"40369967-68d4-4fd5-aa53-432f0c6d4e0f\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.956027 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/40369967-68d4-4fd5-aa53-432f0c6d4e0f-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7\" (UID: \"40369967-68d4-4fd5-aa53-432f0c6d4e0f\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.956030 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/40369967-68d4-4fd5-aa53-432f0c6d4e0f-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7\" (UID: \"40369967-68d4-4fd5-aa53-432f0c6d4e0f\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.956275 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40369967-68d4-4fd5-aa53-432f0c6d4e0f-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7\" (UID: \"40369967-68d4-4fd5-aa53-432f0c6d4e0f\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.959570 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/40369967-68d4-4fd5-aa53-432f0c6d4e0f-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7\" (UID: \"40369967-68d4-4fd5-aa53-432f0c6d4e0f\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.965275 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/40369967-68d4-4fd5-aa53-432f0c6d4e0f-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7\" (UID: \"40369967-68d4-4fd5-aa53-432f0c6d4e0f\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7" Nov 25 10:15:43 crc kubenswrapper[4854]: I1125 10:15:43.971152 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rw8mw\" (UniqueName: \"kubernetes.io/projected/40369967-68d4-4fd5-aa53-432f0c6d4e0f-kube-api-access-rw8mw\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7\" (UID: \"40369967-68d4-4fd5-aa53-432f0c6d4e0f\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7" Nov 25 10:15:44 crc kubenswrapper[4854]: I1125 10:15:44.106490 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7" Nov 25 10:15:44 crc kubenswrapper[4854]: I1125 10:15:44.764141 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7"] Nov 25 10:15:45 crc kubenswrapper[4854]: I1125 10:15:45.721399 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7" event={"ID":"40369967-68d4-4fd5-aa53-432f0c6d4e0f","Type":"ContainerStarted","Data":"40b236767aeb4b2d6b8628a39cdd891d03eff22c278654da491382d745e14fdb"} Nov 25 10:15:45 crc kubenswrapper[4854]: I1125 10:15:45.721721 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7" event={"ID":"40369967-68d4-4fd5-aa53-432f0c6d4e0f","Type":"ContainerStarted","Data":"3d1b6b1360e0939e1a78b53dc7db7146d164e532d4f35d3362d8171b506d4a5e"} Nov 25 10:15:45 crc kubenswrapper[4854]: I1125 10:15:45.751374 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7" podStartSLOduration=2.25877317 podStartE2EDuration="2.751346439s" podCreationTimestamp="2025-11-25 10:15:43 +0000 UTC" firstStartedPulling="2025-11-25 10:15:44.75491193 +0000 UTC m=+2350.607905306" lastFinishedPulling="2025-11-25 10:15:45.247485199 +0000 UTC m=+2351.100478575" observedRunningTime="2025-11-25 10:15:45.742113145 +0000 UTC m=+2351.595106531" watchObservedRunningTime="2025-11-25 10:15:45.751346439 +0000 UTC m=+2351.604339815" Nov 25 10:15:54 crc kubenswrapper[4854]: I1125 10:15:54.014813 4854 scope.go:117] "RemoveContainer" containerID="087635fd301f31cb709ca3e31dcfb761f6ed178214d846b50fc60a19c19a65a8" Nov 25 10:15:54 crc kubenswrapper[4854]: E1125 10:15:54.016190 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:15:56 crc kubenswrapper[4854]: I1125 10:15:56.055553 4854 scope.go:117] "RemoveContainer" containerID="40d03eaa09f7fc9dfaaa634461b9a2040747d3879ba2b9a4625a1c97b0923f0d" Nov 25 10:16:05 crc kubenswrapper[4854]: I1125 10:16:05.023587 4854 scope.go:117] "RemoveContainer" containerID="087635fd301f31cb709ca3e31dcfb761f6ed178214d846b50fc60a19c19a65a8" Nov 25 10:16:05 crc kubenswrapper[4854]: E1125 10:16:05.024661 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:16:19 crc kubenswrapper[4854]: I1125 10:16:19.018026 4854 scope.go:117] "RemoveContainer" containerID="087635fd301f31cb709ca3e31dcfb761f6ed178214d846b50fc60a19c19a65a8" Nov 25 10:16:19 crc kubenswrapper[4854]: E1125 10:16:19.018941 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:16:32 crc kubenswrapper[4854]: I1125 10:16:32.013437 4854 scope.go:117] "RemoveContainer" containerID="087635fd301f31cb709ca3e31dcfb761f6ed178214d846b50fc60a19c19a65a8" Nov 25 10:16:32 crc kubenswrapper[4854]: E1125 10:16:32.014213 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:16:35 crc kubenswrapper[4854]: I1125 10:16:35.312818 4854 generic.go:334] "Generic (PLEG): container finished" podID="40369967-68d4-4fd5-aa53-432f0c6d4e0f" containerID="40b236767aeb4b2d6b8628a39cdd891d03eff22c278654da491382d745e14fdb" exitCode=0 Nov 25 10:16:35 crc kubenswrapper[4854]: I1125 10:16:35.313180 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7" event={"ID":"40369967-68d4-4fd5-aa53-432f0c6d4e0f","Type":"ContainerDied","Data":"40b236767aeb4b2d6b8628a39cdd891d03eff22c278654da491382d745e14fdb"} Nov 25 10:16:36 crc kubenswrapper[4854]: I1125 10:16:36.847446 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.029284 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40369967-68d4-4fd5-aa53-432f0c6d4e0f-neutron-metadata-combined-ca-bundle\") pod \"40369967-68d4-4fd5-aa53-432f0c6d4e0f\" (UID: \"40369967-68d4-4fd5-aa53-432f0c6d4e0f\") " Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.029455 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rw8mw\" (UniqueName: \"kubernetes.io/projected/40369967-68d4-4fd5-aa53-432f0c6d4e0f-kube-api-access-rw8mw\") pod \"40369967-68d4-4fd5-aa53-432f0c6d4e0f\" (UID: \"40369967-68d4-4fd5-aa53-432f0c6d4e0f\") " Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.029793 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/40369967-68d4-4fd5-aa53-432f0c6d4e0f-ssh-key\") pod \"40369967-68d4-4fd5-aa53-432f0c6d4e0f\" (UID: \"40369967-68d4-4fd5-aa53-432f0c6d4e0f\") " Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.029858 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/40369967-68d4-4fd5-aa53-432f0c6d4e0f-neutron-ovn-metadata-agent-neutron-config-0\") pod \"40369967-68d4-4fd5-aa53-432f0c6d4e0f\" (UID: \"40369967-68d4-4fd5-aa53-432f0c6d4e0f\") " Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.029906 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/40369967-68d4-4fd5-aa53-432f0c6d4e0f-nova-metadata-neutron-config-0\") pod \"40369967-68d4-4fd5-aa53-432f0c6d4e0f\" (UID: \"40369967-68d4-4fd5-aa53-432f0c6d4e0f\") " Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.029989 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/40369967-68d4-4fd5-aa53-432f0c6d4e0f-inventory\") pod \"40369967-68d4-4fd5-aa53-432f0c6d4e0f\" (UID: \"40369967-68d4-4fd5-aa53-432f0c6d4e0f\") " Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.038641 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40369967-68d4-4fd5-aa53-432f0c6d4e0f-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "40369967-68d4-4fd5-aa53-432f0c6d4e0f" (UID: "40369967-68d4-4fd5-aa53-432f0c6d4e0f"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.048762 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40369967-68d4-4fd5-aa53-432f0c6d4e0f-kube-api-access-rw8mw" (OuterVolumeSpecName: "kube-api-access-rw8mw") pod "40369967-68d4-4fd5-aa53-432f0c6d4e0f" (UID: "40369967-68d4-4fd5-aa53-432f0c6d4e0f"). InnerVolumeSpecName "kube-api-access-rw8mw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.064020 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40369967-68d4-4fd5-aa53-432f0c6d4e0f-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "40369967-68d4-4fd5-aa53-432f0c6d4e0f" (UID: "40369967-68d4-4fd5-aa53-432f0c6d4e0f"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.075381 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40369967-68d4-4fd5-aa53-432f0c6d4e0f-inventory" (OuterVolumeSpecName: "inventory") pod "40369967-68d4-4fd5-aa53-432f0c6d4e0f" (UID: "40369967-68d4-4fd5-aa53-432f0c6d4e0f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.076982 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40369967-68d4-4fd5-aa53-432f0c6d4e0f-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "40369967-68d4-4fd5-aa53-432f0c6d4e0f" (UID: "40369967-68d4-4fd5-aa53-432f0c6d4e0f"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.081129 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40369967-68d4-4fd5-aa53-432f0c6d4e0f-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "40369967-68d4-4fd5-aa53-432f0c6d4e0f" (UID: "40369967-68d4-4fd5-aa53-432f0c6d4e0f"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.134501 4854 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/40369967-68d4-4fd5-aa53-432f0c6d4e0f-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.134551 4854 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40369967-68d4-4fd5-aa53-432f0c6d4e0f-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.134567 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rw8mw\" (UniqueName: \"kubernetes.io/projected/40369967-68d4-4fd5-aa53-432f0c6d4e0f-kube-api-access-rw8mw\") on node \"crc\" DevicePath \"\"" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.134580 4854 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/40369967-68d4-4fd5-aa53-432f0c6d4e0f-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.134594 4854 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/40369967-68d4-4fd5-aa53-432f0c6d4e0f-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.134608 4854 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/40369967-68d4-4fd5-aa53-432f0c6d4e0f-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.338066 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7" event={"ID":"40369967-68d4-4fd5-aa53-432f0c6d4e0f","Type":"ContainerDied","Data":"3d1b6b1360e0939e1a78b53dc7db7146d164e532d4f35d3362d8171b506d4a5e"} Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.338113 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3d1b6b1360e0939e1a78b53dc7db7146d164e532d4f35d3362d8171b506d4a5e" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.338177 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.451443 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9"] Nov 25 10:16:37 crc kubenswrapper[4854]: E1125 10:16:37.451981 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40369967-68d4-4fd5-aa53-432f0c6d4e0f" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.451998 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="40369967-68d4-4fd5-aa53-432f0c6d4e0f" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.452234 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="40369967-68d4-4fd5-aa53-432f0c6d4e0f" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.453019 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.456038 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.456324 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.456416 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.456512 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.456827 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-6xbdw" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.475549 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9"] Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.543799 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/897c33c5-2a69-4163-a965-7ebe1881ce1e-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9\" (UID: \"897c33c5-2a69-4163-a965-7ebe1881ce1e\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.543893 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/897c33c5-2a69-4163-a965-7ebe1881ce1e-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9\" (UID: \"897c33c5-2a69-4163-a965-7ebe1881ce1e\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.543952 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/897c33c5-2a69-4163-a965-7ebe1881ce1e-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9\" (UID: \"897c33c5-2a69-4163-a965-7ebe1881ce1e\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.543983 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k5xsg\" (UniqueName: \"kubernetes.io/projected/897c33c5-2a69-4163-a965-7ebe1881ce1e-kube-api-access-k5xsg\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9\" (UID: \"897c33c5-2a69-4163-a965-7ebe1881ce1e\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.544096 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/897c33c5-2a69-4163-a965-7ebe1881ce1e-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9\" (UID: \"897c33c5-2a69-4163-a965-7ebe1881ce1e\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.646681 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/897c33c5-2a69-4163-a965-7ebe1881ce1e-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9\" (UID: \"897c33c5-2a69-4163-a965-7ebe1881ce1e\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.647016 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/897c33c5-2a69-4163-a965-7ebe1881ce1e-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9\" (UID: \"897c33c5-2a69-4163-a965-7ebe1881ce1e\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.647172 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/897c33c5-2a69-4163-a965-7ebe1881ce1e-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9\" (UID: \"897c33c5-2a69-4163-a965-7ebe1881ce1e\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.647375 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/897c33c5-2a69-4163-a965-7ebe1881ce1e-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9\" (UID: \"897c33c5-2a69-4163-a965-7ebe1881ce1e\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.647508 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k5xsg\" (UniqueName: \"kubernetes.io/projected/897c33c5-2a69-4163-a965-7ebe1881ce1e-kube-api-access-k5xsg\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9\" (UID: \"897c33c5-2a69-4163-a965-7ebe1881ce1e\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.651252 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/897c33c5-2a69-4163-a965-7ebe1881ce1e-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9\" (UID: \"897c33c5-2a69-4163-a965-7ebe1881ce1e\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.651827 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/897c33c5-2a69-4163-a965-7ebe1881ce1e-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9\" (UID: \"897c33c5-2a69-4163-a965-7ebe1881ce1e\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.652220 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/897c33c5-2a69-4163-a965-7ebe1881ce1e-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9\" (UID: \"897c33c5-2a69-4163-a965-7ebe1881ce1e\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.653207 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/897c33c5-2a69-4163-a965-7ebe1881ce1e-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9\" (UID: \"897c33c5-2a69-4163-a965-7ebe1881ce1e\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.668276 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k5xsg\" (UniqueName: \"kubernetes.io/projected/897c33c5-2a69-4163-a965-7ebe1881ce1e-kube-api-access-k5xsg\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9\" (UID: \"897c33c5-2a69-4163-a965-7ebe1881ce1e\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9" Nov 25 10:16:37 crc kubenswrapper[4854]: I1125 10:16:37.790514 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9" Nov 25 10:16:38 crc kubenswrapper[4854]: I1125 10:16:38.397947 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9"] Nov 25 10:16:39 crc kubenswrapper[4854]: I1125 10:16:39.375634 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9" event={"ID":"897c33c5-2a69-4163-a965-7ebe1881ce1e","Type":"ContainerStarted","Data":"6908231b4ffdf17055f6278a3025c4b2c9b065099e1ca333742d1babfca29917"} Nov 25 10:16:40 crc kubenswrapper[4854]: I1125 10:16:40.392028 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9" event={"ID":"897c33c5-2a69-4163-a965-7ebe1881ce1e","Type":"ContainerStarted","Data":"7540dd02098b49ef115ece1085100fec63ea7ede2880e6557812634d636ed752"} Nov 25 10:16:40 crc kubenswrapper[4854]: I1125 10:16:40.423654 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9" podStartSLOduration=2.2261483 podStartE2EDuration="3.423634856s" podCreationTimestamp="2025-11-25 10:16:37 +0000 UTC" firstStartedPulling="2025-11-25 10:16:38.414660848 +0000 UTC m=+2404.267654214" lastFinishedPulling="2025-11-25 10:16:39.612147394 +0000 UTC m=+2405.465140770" observedRunningTime="2025-11-25 10:16:40.419848251 +0000 UTC m=+2406.272841627" watchObservedRunningTime="2025-11-25 10:16:40.423634856 +0000 UTC m=+2406.276628232" Nov 25 10:16:45 crc kubenswrapper[4854]: I1125 10:16:45.022031 4854 scope.go:117] "RemoveContainer" containerID="087635fd301f31cb709ca3e31dcfb761f6ed178214d846b50fc60a19c19a65a8" Nov 25 10:16:45 crc kubenswrapper[4854]: E1125 10:16:45.022729 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:16:57 crc kubenswrapper[4854]: I1125 10:16:57.014634 4854 scope.go:117] "RemoveContainer" containerID="087635fd301f31cb709ca3e31dcfb761f6ed178214d846b50fc60a19c19a65a8" Nov 25 10:16:57 crc kubenswrapper[4854]: E1125 10:16:57.015945 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:17:10 crc kubenswrapper[4854]: I1125 10:17:10.013541 4854 scope.go:117] "RemoveContainer" containerID="087635fd301f31cb709ca3e31dcfb761f6ed178214d846b50fc60a19c19a65a8" Nov 25 10:17:10 crc kubenswrapper[4854]: E1125 10:17:10.014497 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:17:21 crc kubenswrapper[4854]: I1125 10:17:21.013733 4854 scope.go:117] "RemoveContainer" containerID="087635fd301f31cb709ca3e31dcfb761f6ed178214d846b50fc60a19c19a65a8" Nov 25 10:17:21 crc kubenswrapper[4854]: E1125 10:17:21.014500 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:17:34 crc kubenswrapper[4854]: I1125 10:17:34.013917 4854 scope.go:117] "RemoveContainer" containerID="087635fd301f31cb709ca3e31dcfb761f6ed178214d846b50fc60a19c19a65a8" Nov 25 10:17:34 crc kubenswrapper[4854]: E1125 10:17:34.014689 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:17:49 crc kubenswrapper[4854]: I1125 10:17:49.043243 4854 scope.go:117] "RemoveContainer" containerID="087635fd301f31cb709ca3e31dcfb761f6ed178214d846b50fc60a19c19a65a8" Nov 25 10:17:49 crc kubenswrapper[4854]: E1125 10:17:49.065191 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:18:04 crc kubenswrapper[4854]: I1125 10:18:04.014649 4854 scope.go:117] "RemoveContainer" containerID="087635fd301f31cb709ca3e31dcfb761f6ed178214d846b50fc60a19c19a65a8" Nov 25 10:18:04 crc kubenswrapper[4854]: E1125 10:18:04.015542 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:18:15 crc kubenswrapper[4854]: I1125 10:18:15.023366 4854 scope.go:117] "RemoveContainer" containerID="087635fd301f31cb709ca3e31dcfb761f6ed178214d846b50fc60a19c19a65a8" Nov 25 10:18:15 crc kubenswrapper[4854]: E1125 10:18:15.024495 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:18:28 crc kubenswrapper[4854]: I1125 10:18:28.014347 4854 scope.go:117] "RemoveContainer" containerID="087635fd301f31cb709ca3e31dcfb761f6ed178214d846b50fc60a19c19a65a8" Nov 25 10:18:28 crc kubenswrapper[4854]: E1125 10:18:28.015079 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:18:43 crc kubenswrapper[4854]: I1125 10:18:43.014874 4854 scope.go:117] "RemoveContainer" containerID="087635fd301f31cb709ca3e31dcfb761f6ed178214d846b50fc60a19c19a65a8" Nov 25 10:18:43 crc kubenswrapper[4854]: E1125 10:18:43.015921 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:18:55 crc kubenswrapper[4854]: I1125 10:18:55.028101 4854 scope.go:117] "RemoveContainer" containerID="087635fd301f31cb709ca3e31dcfb761f6ed178214d846b50fc60a19c19a65a8" Nov 25 10:18:55 crc kubenswrapper[4854]: E1125 10:18:55.029520 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:19:08 crc kubenswrapper[4854]: I1125 10:19:08.014493 4854 scope.go:117] "RemoveContainer" containerID="087635fd301f31cb709ca3e31dcfb761f6ed178214d846b50fc60a19c19a65a8" Nov 25 10:19:08 crc kubenswrapper[4854]: E1125 10:19:08.016096 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:19:19 crc kubenswrapper[4854]: I1125 10:19:19.014355 4854 scope.go:117] "RemoveContainer" containerID="087635fd301f31cb709ca3e31dcfb761f6ed178214d846b50fc60a19c19a65a8" Nov 25 10:19:19 crc kubenswrapper[4854]: E1125 10:19:19.015273 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:19:33 crc kubenswrapper[4854]: I1125 10:19:33.186574 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-4fzbl"] Nov 25 10:19:33 crc kubenswrapper[4854]: I1125 10:19:33.190113 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4fzbl" Nov 25 10:19:33 crc kubenswrapper[4854]: I1125 10:19:33.203222 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4fzbl"] Nov 25 10:19:33 crc kubenswrapper[4854]: I1125 10:19:33.353475 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/480daa48-5e27-41a4-b4f1-426e64a0654c-utilities\") pod \"redhat-operators-4fzbl\" (UID: \"480daa48-5e27-41a4-b4f1-426e64a0654c\") " pod="openshift-marketplace/redhat-operators-4fzbl" Nov 25 10:19:33 crc kubenswrapper[4854]: I1125 10:19:33.353548 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/480daa48-5e27-41a4-b4f1-426e64a0654c-catalog-content\") pod \"redhat-operators-4fzbl\" (UID: \"480daa48-5e27-41a4-b4f1-426e64a0654c\") " pod="openshift-marketplace/redhat-operators-4fzbl" Nov 25 10:19:33 crc kubenswrapper[4854]: I1125 10:19:33.355993 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-89zld\" (UniqueName: \"kubernetes.io/projected/480daa48-5e27-41a4-b4f1-426e64a0654c-kube-api-access-89zld\") pod \"redhat-operators-4fzbl\" (UID: \"480daa48-5e27-41a4-b4f1-426e64a0654c\") " pod="openshift-marketplace/redhat-operators-4fzbl" Nov 25 10:19:33 crc kubenswrapper[4854]: I1125 10:19:33.458689 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/480daa48-5e27-41a4-b4f1-426e64a0654c-utilities\") pod \"redhat-operators-4fzbl\" (UID: \"480daa48-5e27-41a4-b4f1-426e64a0654c\") " pod="openshift-marketplace/redhat-operators-4fzbl" Nov 25 10:19:33 crc kubenswrapper[4854]: I1125 10:19:33.458962 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/480daa48-5e27-41a4-b4f1-426e64a0654c-catalog-content\") pod \"redhat-operators-4fzbl\" (UID: \"480daa48-5e27-41a4-b4f1-426e64a0654c\") " pod="openshift-marketplace/redhat-operators-4fzbl" Nov 25 10:19:33 crc kubenswrapper[4854]: I1125 10:19:33.459039 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-89zld\" (UniqueName: \"kubernetes.io/projected/480daa48-5e27-41a4-b4f1-426e64a0654c-kube-api-access-89zld\") pod \"redhat-operators-4fzbl\" (UID: \"480daa48-5e27-41a4-b4f1-426e64a0654c\") " pod="openshift-marketplace/redhat-operators-4fzbl" Nov 25 10:19:33 crc kubenswrapper[4854]: I1125 10:19:33.459315 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/480daa48-5e27-41a4-b4f1-426e64a0654c-utilities\") pod \"redhat-operators-4fzbl\" (UID: \"480daa48-5e27-41a4-b4f1-426e64a0654c\") " pod="openshift-marketplace/redhat-operators-4fzbl" Nov 25 10:19:33 crc kubenswrapper[4854]: I1125 10:19:33.459395 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/480daa48-5e27-41a4-b4f1-426e64a0654c-catalog-content\") pod \"redhat-operators-4fzbl\" (UID: \"480daa48-5e27-41a4-b4f1-426e64a0654c\") " pod="openshift-marketplace/redhat-operators-4fzbl" Nov 25 10:19:33 crc kubenswrapper[4854]: I1125 10:19:33.486527 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-89zld\" (UniqueName: \"kubernetes.io/projected/480daa48-5e27-41a4-b4f1-426e64a0654c-kube-api-access-89zld\") pod \"redhat-operators-4fzbl\" (UID: \"480daa48-5e27-41a4-b4f1-426e64a0654c\") " pod="openshift-marketplace/redhat-operators-4fzbl" Nov 25 10:19:33 crc kubenswrapper[4854]: I1125 10:19:33.513391 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4fzbl" Nov 25 10:19:34 crc kubenswrapper[4854]: I1125 10:19:34.014639 4854 scope.go:117] "RemoveContainer" containerID="087635fd301f31cb709ca3e31dcfb761f6ed178214d846b50fc60a19c19a65a8" Nov 25 10:19:34 crc kubenswrapper[4854]: I1125 10:19:34.049129 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4fzbl"] Nov 25 10:19:34 crc kubenswrapper[4854]: I1125 10:19:34.483633 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerStarted","Data":"d4dce61a59fd748b9095cb222d9a2995882c94f0f1c60c5b9043718eded2f02b"} Nov 25 10:19:34 crc kubenswrapper[4854]: I1125 10:19:34.490621 4854 generic.go:334] "Generic (PLEG): container finished" podID="480daa48-5e27-41a4-b4f1-426e64a0654c" containerID="7112297bdce58905f4e1d1fc8c432608e42c56563361b23d56719a785043f111" exitCode=0 Nov 25 10:19:34 crc kubenswrapper[4854]: I1125 10:19:34.490739 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4fzbl" event={"ID":"480daa48-5e27-41a4-b4f1-426e64a0654c","Type":"ContainerDied","Data":"7112297bdce58905f4e1d1fc8c432608e42c56563361b23d56719a785043f111"} Nov 25 10:19:34 crc kubenswrapper[4854]: I1125 10:19:34.490766 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4fzbl" event={"ID":"480daa48-5e27-41a4-b4f1-426e64a0654c","Type":"ContainerStarted","Data":"d95d985ccecf315b9d7d185603971b026e75cd8ab03214493af7b12c487c7f49"} Nov 25 10:19:34 crc kubenswrapper[4854]: I1125 10:19:34.496156 4854 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 10:19:35 crc kubenswrapper[4854]: I1125 10:19:35.508251 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4fzbl" event={"ID":"480daa48-5e27-41a4-b4f1-426e64a0654c","Type":"ContainerStarted","Data":"57b9dd54df67354260b60e8635a62b660f18ac6755968c7c3810a6a245143bac"} Nov 25 10:19:40 crc kubenswrapper[4854]: I1125 10:19:40.564284 4854 generic.go:334] "Generic (PLEG): container finished" podID="480daa48-5e27-41a4-b4f1-426e64a0654c" containerID="57b9dd54df67354260b60e8635a62b660f18ac6755968c7c3810a6a245143bac" exitCode=0 Nov 25 10:19:40 crc kubenswrapper[4854]: I1125 10:19:40.564349 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4fzbl" event={"ID":"480daa48-5e27-41a4-b4f1-426e64a0654c","Type":"ContainerDied","Data":"57b9dd54df67354260b60e8635a62b660f18ac6755968c7c3810a6a245143bac"} Nov 25 10:19:41 crc kubenswrapper[4854]: I1125 10:19:41.576734 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4fzbl" event={"ID":"480daa48-5e27-41a4-b4f1-426e64a0654c","Type":"ContainerStarted","Data":"774514b69661de0a4a5174a503bf1fb392c37f8668fb15d8ca941120bc3951de"} Nov 25 10:19:41 crc kubenswrapper[4854]: I1125 10:19:41.608666 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-4fzbl" podStartSLOduration=2.051977003 podStartE2EDuration="8.608641353s" podCreationTimestamp="2025-11-25 10:19:33 +0000 UTC" firstStartedPulling="2025-11-25 10:19:34.495861787 +0000 UTC m=+2580.348855163" lastFinishedPulling="2025-11-25 10:19:41.052526137 +0000 UTC m=+2586.905519513" observedRunningTime="2025-11-25 10:19:41.595552974 +0000 UTC m=+2587.448546350" watchObservedRunningTime="2025-11-25 10:19:41.608641353 +0000 UTC m=+2587.461634729" Nov 25 10:19:43 crc kubenswrapper[4854]: I1125 10:19:43.513966 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-4fzbl" Nov 25 10:19:43 crc kubenswrapper[4854]: I1125 10:19:43.514310 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-4fzbl" Nov 25 10:19:44 crc kubenswrapper[4854]: I1125 10:19:44.565913 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-4fzbl" podUID="480daa48-5e27-41a4-b4f1-426e64a0654c" containerName="registry-server" probeResult="failure" output=< Nov 25 10:19:44 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 10:19:44 crc kubenswrapper[4854]: > Nov 25 10:19:53 crc kubenswrapper[4854]: I1125 10:19:53.567371 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-4fzbl" Nov 25 10:19:53 crc kubenswrapper[4854]: I1125 10:19:53.621856 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-4fzbl" Nov 25 10:19:53 crc kubenswrapper[4854]: I1125 10:19:53.803840 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4fzbl"] Nov 25 10:19:54 crc kubenswrapper[4854]: I1125 10:19:54.742301 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-4fzbl" podUID="480daa48-5e27-41a4-b4f1-426e64a0654c" containerName="registry-server" containerID="cri-o://774514b69661de0a4a5174a503bf1fb392c37f8668fb15d8ca941120bc3951de" gracePeriod=2 Nov 25 10:19:55 crc kubenswrapper[4854]: I1125 10:19:55.295082 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4fzbl" Nov 25 10:19:55 crc kubenswrapper[4854]: I1125 10:19:55.470626 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-89zld\" (UniqueName: \"kubernetes.io/projected/480daa48-5e27-41a4-b4f1-426e64a0654c-kube-api-access-89zld\") pod \"480daa48-5e27-41a4-b4f1-426e64a0654c\" (UID: \"480daa48-5e27-41a4-b4f1-426e64a0654c\") " Nov 25 10:19:55 crc kubenswrapper[4854]: I1125 10:19:55.470721 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/480daa48-5e27-41a4-b4f1-426e64a0654c-catalog-content\") pod \"480daa48-5e27-41a4-b4f1-426e64a0654c\" (UID: \"480daa48-5e27-41a4-b4f1-426e64a0654c\") " Nov 25 10:19:55 crc kubenswrapper[4854]: I1125 10:19:55.470984 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/480daa48-5e27-41a4-b4f1-426e64a0654c-utilities\") pod \"480daa48-5e27-41a4-b4f1-426e64a0654c\" (UID: \"480daa48-5e27-41a4-b4f1-426e64a0654c\") " Nov 25 10:19:55 crc kubenswrapper[4854]: I1125 10:19:55.471614 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/480daa48-5e27-41a4-b4f1-426e64a0654c-utilities" (OuterVolumeSpecName: "utilities") pod "480daa48-5e27-41a4-b4f1-426e64a0654c" (UID: "480daa48-5e27-41a4-b4f1-426e64a0654c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:19:55 crc kubenswrapper[4854]: I1125 10:19:55.471800 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/480daa48-5e27-41a4-b4f1-426e64a0654c-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:19:55 crc kubenswrapper[4854]: I1125 10:19:55.486039 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/480daa48-5e27-41a4-b4f1-426e64a0654c-kube-api-access-89zld" (OuterVolumeSpecName: "kube-api-access-89zld") pod "480daa48-5e27-41a4-b4f1-426e64a0654c" (UID: "480daa48-5e27-41a4-b4f1-426e64a0654c"). InnerVolumeSpecName "kube-api-access-89zld". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:19:55 crc kubenswrapper[4854]: I1125 10:19:55.570228 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/480daa48-5e27-41a4-b4f1-426e64a0654c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "480daa48-5e27-41a4-b4f1-426e64a0654c" (UID: "480daa48-5e27-41a4-b4f1-426e64a0654c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:19:55 crc kubenswrapper[4854]: I1125 10:19:55.574694 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-89zld\" (UniqueName: \"kubernetes.io/projected/480daa48-5e27-41a4-b4f1-426e64a0654c-kube-api-access-89zld\") on node \"crc\" DevicePath \"\"" Nov 25 10:19:55 crc kubenswrapper[4854]: I1125 10:19:55.574741 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/480daa48-5e27-41a4-b4f1-426e64a0654c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:19:55 crc kubenswrapper[4854]: I1125 10:19:55.778342 4854 generic.go:334] "Generic (PLEG): container finished" podID="480daa48-5e27-41a4-b4f1-426e64a0654c" containerID="774514b69661de0a4a5174a503bf1fb392c37f8668fb15d8ca941120bc3951de" exitCode=0 Nov 25 10:19:55 crc kubenswrapper[4854]: I1125 10:19:55.778390 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4fzbl" event={"ID":"480daa48-5e27-41a4-b4f1-426e64a0654c","Type":"ContainerDied","Data":"774514b69661de0a4a5174a503bf1fb392c37f8668fb15d8ca941120bc3951de"} Nov 25 10:19:55 crc kubenswrapper[4854]: I1125 10:19:55.778416 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4fzbl" event={"ID":"480daa48-5e27-41a4-b4f1-426e64a0654c","Type":"ContainerDied","Data":"d95d985ccecf315b9d7d185603971b026e75cd8ab03214493af7b12c487c7f49"} Nov 25 10:19:55 crc kubenswrapper[4854]: I1125 10:19:55.778432 4854 scope.go:117] "RemoveContainer" containerID="774514b69661de0a4a5174a503bf1fb392c37f8668fb15d8ca941120bc3951de" Nov 25 10:19:55 crc kubenswrapper[4854]: I1125 10:19:55.780847 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4fzbl" Nov 25 10:19:55 crc kubenswrapper[4854]: I1125 10:19:55.800466 4854 scope.go:117] "RemoveContainer" containerID="57b9dd54df67354260b60e8635a62b660f18ac6755968c7c3810a6a245143bac" Nov 25 10:19:55 crc kubenswrapper[4854]: I1125 10:19:55.833620 4854 scope.go:117] "RemoveContainer" containerID="7112297bdce58905f4e1d1fc8c432608e42c56563361b23d56719a785043f111" Nov 25 10:19:55 crc kubenswrapper[4854]: I1125 10:19:55.835159 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4fzbl"] Nov 25 10:19:55 crc kubenswrapper[4854]: I1125 10:19:55.846011 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-4fzbl"] Nov 25 10:19:55 crc kubenswrapper[4854]: I1125 10:19:55.905525 4854 scope.go:117] "RemoveContainer" containerID="774514b69661de0a4a5174a503bf1fb392c37f8668fb15d8ca941120bc3951de" Nov 25 10:19:55 crc kubenswrapper[4854]: E1125 10:19:55.906378 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"774514b69661de0a4a5174a503bf1fb392c37f8668fb15d8ca941120bc3951de\": container with ID starting with 774514b69661de0a4a5174a503bf1fb392c37f8668fb15d8ca941120bc3951de not found: ID does not exist" containerID="774514b69661de0a4a5174a503bf1fb392c37f8668fb15d8ca941120bc3951de" Nov 25 10:19:55 crc kubenswrapper[4854]: I1125 10:19:55.906424 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"774514b69661de0a4a5174a503bf1fb392c37f8668fb15d8ca941120bc3951de"} err="failed to get container status \"774514b69661de0a4a5174a503bf1fb392c37f8668fb15d8ca941120bc3951de\": rpc error: code = NotFound desc = could not find container \"774514b69661de0a4a5174a503bf1fb392c37f8668fb15d8ca941120bc3951de\": container with ID starting with 774514b69661de0a4a5174a503bf1fb392c37f8668fb15d8ca941120bc3951de not found: ID does not exist" Nov 25 10:19:55 crc kubenswrapper[4854]: I1125 10:19:55.906451 4854 scope.go:117] "RemoveContainer" containerID="57b9dd54df67354260b60e8635a62b660f18ac6755968c7c3810a6a245143bac" Nov 25 10:19:55 crc kubenswrapper[4854]: E1125 10:19:55.907324 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"57b9dd54df67354260b60e8635a62b660f18ac6755968c7c3810a6a245143bac\": container with ID starting with 57b9dd54df67354260b60e8635a62b660f18ac6755968c7c3810a6a245143bac not found: ID does not exist" containerID="57b9dd54df67354260b60e8635a62b660f18ac6755968c7c3810a6a245143bac" Nov 25 10:19:55 crc kubenswrapper[4854]: I1125 10:19:55.907374 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"57b9dd54df67354260b60e8635a62b660f18ac6755968c7c3810a6a245143bac"} err="failed to get container status \"57b9dd54df67354260b60e8635a62b660f18ac6755968c7c3810a6a245143bac\": rpc error: code = NotFound desc = could not find container \"57b9dd54df67354260b60e8635a62b660f18ac6755968c7c3810a6a245143bac\": container with ID starting with 57b9dd54df67354260b60e8635a62b660f18ac6755968c7c3810a6a245143bac not found: ID does not exist" Nov 25 10:19:55 crc kubenswrapper[4854]: I1125 10:19:55.907404 4854 scope.go:117] "RemoveContainer" containerID="7112297bdce58905f4e1d1fc8c432608e42c56563361b23d56719a785043f111" Nov 25 10:19:55 crc kubenswrapper[4854]: E1125 10:19:55.907912 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7112297bdce58905f4e1d1fc8c432608e42c56563361b23d56719a785043f111\": container with ID starting with 7112297bdce58905f4e1d1fc8c432608e42c56563361b23d56719a785043f111 not found: ID does not exist" containerID="7112297bdce58905f4e1d1fc8c432608e42c56563361b23d56719a785043f111" Nov 25 10:19:55 crc kubenswrapper[4854]: I1125 10:19:55.907945 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7112297bdce58905f4e1d1fc8c432608e42c56563361b23d56719a785043f111"} err="failed to get container status \"7112297bdce58905f4e1d1fc8c432608e42c56563361b23d56719a785043f111\": rpc error: code = NotFound desc = could not find container \"7112297bdce58905f4e1d1fc8c432608e42c56563361b23d56719a785043f111\": container with ID starting with 7112297bdce58905f4e1d1fc8c432608e42c56563361b23d56719a785043f111 not found: ID does not exist" Nov 25 10:19:57 crc kubenswrapper[4854]: I1125 10:19:57.026307 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="480daa48-5e27-41a4-b4f1-426e64a0654c" path="/var/lib/kubelet/pods/480daa48-5e27-41a4-b4f1-426e64a0654c/volumes" Nov 25 10:20:21 crc kubenswrapper[4854]: I1125 10:20:21.122034 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-qtvl4"] Nov 25 10:20:21 crc kubenswrapper[4854]: E1125 10:20:21.124460 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="480daa48-5e27-41a4-b4f1-426e64a0654c" containerName="extract-utilities" Nov 25 10:20:21 crc kubenswrapper[4854]: I1125 10:20:21.124572 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="480daa48-5e27-41a4-b4f1-426e64a0654c" containerName="extract-utilities" Nov 25 10:20:21 crc kubenswrapper[4854]: E1125 10:20:21.124716 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="480daa48-5e27-41a4-b4f1-426e64a0654c" containerName="registry-server" Nov 25 10:20:21 crc kubenswrapper[4854]: I1125 10:20:21.124803 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="480daa48-5e27-41a4-b4f1-426e64a0654c" containerName="registry-server" Nov 25 10:20:21 crc kubenswrapper[4854]: E1125 10:20:21.124915 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="480daa48-5e27-41a4-b4f1-426e64a0654c" containerName="extract-content" Nov 25 10:20:21 crc kubenswrapper[4854]: I1125 10:20:21.124996 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="480daa48-5e27-41a4-b4f1-426e64a0654c" containerName="extract-content" Nov 25 10:20:21 crc kubenswrapper[4854]: I1125 10:20:21.125391 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="480daa48-5e27-41a4-b4f1-426e64a0654c" containerName="registry-server" Nov 25 10:20:21 crc kubenswrapper[4854]: I1125 10:20:21.127813 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qtvl4" Nov 25 10:20:21 crc kubenswrapper[4854]: I1125 10:20:21.141593 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qtvl4"] Nov 25 10:20:21 crc kubenswrapper[4854]: I1125 10:20:21.247795 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/faf48b6a-fafa-4388-a425-d1a0fe191e96-catalog-content\") pod \"community-operators-qtvl4\" (UID: \"faf48b6a-fafa-4388-a425-d1a0fe191e96\") " pod="openshift-marketplace/community-operators-qtvl4" Nov 25 10:20:21 crc kubenswrapper[4854]: I1125 10:20:21.248506 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x8rgd\" (UniqueName: \"kubernetes.io/projected/faf48b6a-fafa-4388-a425-d1a0fe191e96-kube-api-access-x8rgd\") pod \"community-operators-qtvl4\" (UID: \"faf48b6a-fafa-4388-a425-d1a0fe191e96\") " pod="openshift-marketplace/community-operators-qtvl4" Nov 25 10:20:21 crc kubenswrapper[4854]: I1125 10:20:21.248611 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/faf48b6a-fafa-4388-a425-d1a0fe191e96-utilities\") pod \"community-operators-qtvl4\" (UID: \"faf48b6a-fafa-4388-a425-d1a0fe191e96\") " pod="openshift-marketplace/community-operators-qtvl4" Nov 25 10:20:21 crc kubenswrapper[4854]: I1125 10:20:21.350500 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/faf48b6a-fafa-4388-a425-d1a0fe191e96-catalog-content\") pod \"community-operators-qtvl4\" (UID: \"faf48b6a-fafa-4388-a425-d1a0fe191e96\") " pod="openshift-marketplace/community-operators-qtvl4" Nov 25 10:20:21 crc kubenswrapper[4854]: I1125 10:20:21.351006 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/faf48b6a-fafa-4388-a425-d1a0fe191e96-catalog-content\") pod \"community-operators-qtvl4\" (UID: \"faf48b6a-fafa-4388-a425-d1a0fe191e96\") " pod="openshift-marketplace/community-operators-qtvl4" Nov 25 10:20:21 crc kubenswrapper[4854]: I1125 10:20:21.351157 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x8rgd\" (UniqueName: \"kubernetes.io/projected/faf48b6a-fafa-4388-a425-d1a0fe191e96-kube-api-access-x8rgd\") pod \"community-operators-qtvl4\" (UID: \"faf48b6a-fafa-4388-a425-d1a0fe191e96\") " pod="openshift-marketplace/community-operators-qtvl4" Nov 25 10:20:21 crc kubenswrapper[4854]: I1125 10:20:21.351207 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/faf48b6a-fafa-4388-a425-d1a0fe191e96-utilities\") pod \"community-operators-qtvl4\" (UID: \"faf48b6a-fafa-4388-a425-d1a0fe191e96\") " pod="openshift-marketplace/community-operators-qtvl4" Nov 25 10:20:21 crc kubenswrapper[4854]: I1125 10:20:21.351740 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/faf48b6a-fafa-4388-a425-d1a0fe191e96-utilities\") pod \"community-operators-qtvl4\" (UID: \"faf48b6a-fafa-4388-a425-d1a0fe191e96\") " pod="openshift-marketplace/community-operators-qtvl4" Nov 25 10:20:21 crc kubenswrapper[4854]: I1125 10:20:21.372727 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x8rgd\" (UniqueName: \"kubernetes.io/projected/faf48b6a-fafa-4388-a425-d1a0fe191e96-kube-api-access-x8rgd\") pod \"community-operators-qtvl4\" (UID: \"faf48b6a-fafa-4388-a425-d1a0fe191e96\") " pod="openshift-marketplace/community-operators-qtvl4" Nov 25 10:20:21 crc kubenswrapper[4854]: I1125 10:20:21.467498 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qtvl4" Nov 25 10:20:22 crc kubenswrapper[4854]: I1125 10:20:22.050893 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qtvl4"] Nov 25 10:20:23 crc kubenswrapper[4854]: I1125 10:20:23.075044 4854 generic.go:334] "Generic (PLEG): container finished" podID="faf48b6a-fafa-4388-a425-d1a0fe191e96" containerID="409022fad167487362338f5e9ce61212b08db434e2e6a490767d20bb924e2726" exitCode=0 Nov 25 10:20:23 crc kubenswrapper[4854]: I1125 10:20:23.075183 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qtvl4" event={"ID":"faf48b6a-fafa-4388-a425-d1a0fe191e96","Type":"ContainerDied","Data":"409022fad167487362338f5e9ce61212b08db434e2e6a490767d20bb924e2726"} Nov 25 10:20:23 crc kubenswrapper[4854]: I1125 10:20:23.075619 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qtvl4" event={"ID":"faf48b6a-fafa-4388-a425-d1a0fe191e96","Type":"ContainerStarted","Data":"da09464aaa3bfaddcb516356f68122481b177a9a33a53c730513ad1e45bd89e8"} Nov 25 10:20:24 crc kubenswrapper[4854]: I1125 10:20:24.087448 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qtvl4" event={"ID":"faf48b6a-fafa-4388-a425-d1a0fe191e96","Type":"ContainerStarted","Data":"334f922e1251655f1fa38e2ff43326403b41f1a7fa29cec6957a42ef4c378fa7"} Nov 25 10:20:25 crc kubenswrapper[4854]: I1125 10:20:25.099799 4854 generic.go:334] "Generic (PLEG): container finished" podID="faf48b6a-fafa-4388-a425-d1a0fe191e96" containerID="334f922e1251655f1fa38e2ff43326403b41f1a7fa29cec6957a42ef4c378fa7" exitCode=0 Nov 25 10:20:25 crc kubenswrapper[4854]: I1125 10:20:25.099862 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qtvl4" event={"ID":"faf48b6a-fafa-4388-a425-d1a0fe191e96","Type":"ContainerDied","Data":"334f922e1251655f1fa38e2ff43326403b41f1a7fa29cec6957a42ef4c378fa7"} Nov 25 10:20:26 crc kubenswrapper[4854]: I1125 10:20:26.113549 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qtvl4" event={"ID":"faf48b6a-fafa-4388-a425-d1a0fe191e96","Type":"ContainerStarted","Data":"306558f98c4d2690f708f8f0de1ea4e42a6763406b91d072a5548ae8d1be5d54"} Nov 25 10:20:26 crc kubenswrapper[4854]: I1125 10:20:26.145931 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-qtvl4" podStartSLOduration=2.690650536 podStartE2EDuration="5.145908321s" podCreationTimestamp="2025-11-25 10:20:21 +0000 UTC" firstStartedPulling="2025-11-25 10:20:23.078048842 +0000 UTC m=+2628.931042218" lastFinishedPulling="2025-11-25 10:20:25.533306627 +0000 UTC m=+2631.386300003" observedRunningTime="2025-11-25 10:20:26.136156893 +0000 UTC m=+2631.989150269" watchObservedRunningTime="2025-11-25 10:20:26.145908321 +0000 UTC m=+2631.998901687" Nov 25 10:20:31 crc kubenswrapper[4854]: I1125 10:20:31.468497 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-qtvl4" Nov 25 10:20:31 crc kubenswrapper[4854]: I1125 10:20:31.469135 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-qtvl4" Nov 25 10:20:31 crc kubenswrapper[4854]: I1125 10:20:31.516632 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-qtvl4" Nov 25 10:20:32 crc kubenswrapper[4854]: I1125 10:20:32.224271 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-qtvl4" Nov 25 10:20:32 crc kubenswrapper[4854]: I1125 10:20:32.287358 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qtvl4"] Nov 25 10:20:34 crc kubenswrapper[4854]: I1125 10:20:34.192131 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-qtvl4" podUID="faf48b6a-fafa-4388-a425-d1a0fe191e96" containerName="registry-server" containerID="cri-o://306558f98c4d2690f708f8f0de1ea4e42a6763406b91d072a5548ae8d1be5d54" gracePeriod=2 Nov 25 10:20:34 crc kubenswrapper[4854]: I1125 10:20:34.756116 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qtvl4" Nov 25 10:20:34 crc kubenswrapper[4854]: I1125 10:20:34.932422 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/faf48b6a-fafa-4388-a425-d1a0fe191e96-utilities\") pod \"faf48b6a-fafa-4388-a425-d1a0fe191e96\" (UID: \"faf48b6a-fafa-4388-a425-d1a0fe191e96\") " Nov 25 10:20:34 crc kubenswrapper[4854]: I1125 10:20:34.932727 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x8rgd\" (UniqueName: \"kubernetes.io/projected/faf48b6a-fafa-4388-a425-d1a0fe191e96-kube-api-access-x8rgd\") pod \"faf48b6a-fafa-4388-a425-d1a0fe191e96\" (UID: \"faf48b6a-fafa-4388-a425-d1a0fe191e96\") " Nov 25 10:20:34 crc kubenswrapper[4854]: I1125 10:20:34.932918 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/faf48b6a-fafa-4388-a425-d1a0fe191e96-catalog-content\") pod \"faf48b6a-fafa-4388-a425-d1a0fe191e96\" (UID: \"faf48b6a-fafa-4388-a425-d1a0fe191e96\") " Nov 25 10:20:34 crc kubenswrapper[4854]: I1125 10:20:34.933369 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/faf48b6a-fafa-4388-a425-d1a0fe191e96-utilities" (OuterVolumeSpecName: "utilities") pod "faf48b6a-fafa-4388-a425-d1a0fe191e96" (UID: "faf48b6a-fafa-4388-a425-d1a0fe191e96"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:20:34 crc kubenswrapper[4854]: I1125 10:20:34.933999 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/faf48b6a-fafa-4388-a425-d1a0fe191e96-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:20:34 crc kubenswrapper[4854]: I1125 10:20:34.956953 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/faf48b6a-fafa-4388-a425-d1a0fe191e96-kube-api-access-x8rgd" (OuterVolumeSpecName: "kube-api-access-x8rgd") pod "faf48b6a-fafa-4388-a425-d1a0fe191e96" (UID: "faf48b6a-fafa-4388-a425-d1a0fe191e96"). InnerVolumeSpecName "kube-api-access-x8rgd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:20:35 crc kubenswrapper[4854]: I1125 10:20:35.039335 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x8rgd\" (UniqueName: \"kubernetes.io/projected/faf48b6a-fafa-4388-a425-d1a0fe191e96-kube-api-access-x8rgd\") on node \"crc\" DevicePath \"\"" Nov 25 10:20:35 crc kubenswrapper[4854]: I1125 10:20:35.205665 4854 generic.go:334] "Generic (PLEG): container finished" podID="faf48b6a-fafa-4388-a425-d1a0fe191e96" containerID="306558f98c4d2690f708f8f0de1ea4e42a6763406b91d072a5548ae8d1be5d54" exitCode=0 Nov 25 10:20:35 crc kubenswrapper[4854]: I1125 10:20:35.205779 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qtvl4" Nov 25 10:20:35 crc kubenswrapper[4854]: I1125 10:20:35.205938 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qtvl4" event={"ID":"faf48b6a-fafa-4388-a425-d1a0fe191e96","Type":"ContainerDied","Data":"306558f98c4d2690f708f8f0de1ea4e42a6763406b91d072a5548ae8d1be5d54"} Nov 25 10:20:35 crc kubenswrapper[4854]: I1125 10:20:35.206143 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qtvl4" event={"ID":"faf48b6a-fafa-4388-a425-d1a0fe191e96","Type":"ContainerDied","Data":"da09464aaa3bfaddcb516356f68122481b177a9a33a53c730513ad1e45bd89e8"} Nov 25 10:20:35 crc kubenswrapper[4854]: I1125 10:20:35.206174 4854 scope.go:117] "RemoveContainer" containerID="306558f98c4d2690f708f8f0de1ea4e42a6763406b91d072a5548ae8d1be5d54" Nov 25 10:20:35 crc kubenswrapper[4854]: I1125 10:20:35.229483 4854 scope.go:117] "RemoveContainer" containerID="334f922e1251655f1fa38e2ff43326403b41f1a7fa29cec6957a42ef4c378fa7" Nov 25 10:20:35 crc kubenswrapper[4854]: I1125 10:20:35.261884 4854 scope.go:117] "RemoveContainer" containerID="409022fad167487362338f5e9ce61212b08db434e2e6a490767d20bb924e2726" Nov 25 10:20:35 crc kubenswrapper[4854]: I1125 10:20:35.315140 4854 scope.go:117] "RemoveContainer" containerID="306558f98c4d2690f708f8f0de1ea4e42a6763406b91d072a5548ae8d1be5d54" Nov 25 10:20:35 crc kubenswrapper[4854]: E1125 10:20:35.315949 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"306558f98c4d2690f708f8f0de1ea4e42a6763406b91d072a5548ae8d1be5d54\": container with ID starting with 306558f98c4d2690f708f8f0de1ea4e42a6763406b91d072a5548ae8d1be5d54 not found: ID does not exist" containerID="306558f98c4d2690f708f8f0de1ea4e42a6763406b91d072a5548ae8d1be5d54" Nov 25 10:20:35 crc kubenswrapper[4854]: I1125 10:20:35.316027 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"306558f98c4d2690f708f8f0de1ea4e42a6763406b91d072a5548ae8d1be5d54"} err="failed to get container status \"306558f98c4d2690f708f8f0de1ea4e42a6763406b91d072a5548ae8d1be5d54\": rpc error: code = NotFound desc = could not find container \"306558f98c4d2690f708f8f0de1ea4e42a6763406b91d072a5548ae8d1be5d54\": container with ID starting with 306558f98c4d2690f708f8f0de1ea4e42a6763406b91d072a5548ae8d1be5d54 not found: ID does not exist" Nov 25 10:20:35 crc kubenswrapper[4854]: I1125 10:20:35.316063 4854 scope.go:117] "RemoveContainer" containerID="334f922e1251655f1fa38e2ff43326403b41f1a7fa29cec6957a42ef4c378fa7" Nov 25 10:20:35 crc kubenswrapper[4854]: E1125 10:20:35.316423 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"334f922e1251655f1fa38e2ff43326403b41f1a7fa29cec6957a42ef4c378fa7\": container with ID starting with 334f922e1251655f1fa38e2ff43326403b41f1a7fa29cec6957a42ef4c378fa7 not found: ID does not exist" containerID="334f922e1251655f1fa38e2ff43326403b41f1a7fa29cec6957a42ef4c378fa7" Nov 25 10:20:35 crc kubenswrapper[4854]: I1125 10:20:35.316474 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"334f922e1251655f1fa38e2ff43326403b41f1a7fa29cec6957a42ef4c378fa7"} err="failed to get container status \"334f922e1251655f1fa38e2ff43326403b41f1a7fa29cec6957a42ef4c378fa7\": rpc error: code = NotFound desc = could not find container \"334f922e1251655f1fa38e2ff43326403b41f1a7fa29cec6957a42ef4c378fa7\": container with ID starting with 334f922e1251655f1fa38e2ff43326403b41f1a7fa29cec6957a42ef4c378fa7 not found: ID does not exist" Nov 25 10:20:35 crc kubenswrapper[4854]: I1125 10:20:35.316504 4854 scope.go:117] "RemoveContainer" containerID="409022fad167487362338f5e9ce61212b08db434e2e6a490767d20bb924e2726" Nov 25 10:20:35 crc kubenswrapper[4854]: E1125 10:20:35.316812 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"409022fad167487362338f5e9ce61212b08db434e2e6a490767d20bb924e2726\": container with ID starting with 409022fad167487362338f5e9ce61212b08db434e2e6a490767d20bb924e2726 not found: ID does not exist" containerID="409022fad167487362338f5e9ce61212b08db434e2e6a490767d20bb924e2726" Nov 25 10:20:35 crc kubenswrapper[4854]: I1125 10:20:35.316848 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"409022fad167487362338f5e9ce61212b08db434e2e6a490767d20bb924e2726"} err="failed to get container status \"409022fad167487362338f5e9ce61212b08db434e2e6a490767d20bb924e2726\": rpc error: code = NotFound desc = could not find container \"409022fad167487362338f5e9ce61212b08db434e2e6a490767d20bb924e2726\": container with ID starting with 409022fad167487362338f5e9ce61212b08db434e2e6a490767d20bb924e2726 not found: ID does not exist" Nov 25 10:20:35 crc kubenswrapper[4854]: I1125 10:20:35.735267 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/faf48b6a-fafa-4388-a425-d1a0fe191e96-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "faf48b6a-fafa-4388-a425-d1a0fe191e96" (UID: "faf48b6a-fafa-4388-a425-d1a0fe191e96"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:20:35 crc kubenswrapper[4854]: I1125 10:20:35.756557 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/faf48b6a-fafa-4388-a425-d1a0fe191e96-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:20:35 crc kubenswrapper[4854]: I1125 10:20:35.847879 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qtvl4"] Nov 25 10:20:35 crc kubenswrapper[4854]: I1125 10:20:35.859987 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-qtvl4"] Nov 25 10:20:37 crc kubenswrapper[4854]: I1125 10:20:37.041646 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="faf48b6a-fafa-4388-a425-d1a0fe191e96" path="/var/lib/kubelet/pods/faf48b6a-fafa-4388-a425-d1a0fe191e96/volumes" Nov 25 10:21:01 crc kubenswrapper[4854]: I1125 10:21:01.566231 4854 generic.go:334] "Generic (PLEG): container finished" podID="897c33c5-2a69-4163-a965-7ebe1881ce1e" containerID="7540dd02098b49ef115ece1085100fec63ea7ede2880e6557812634d636ed752" exitCode=0 Nov 25 10:21:01 crc kubenswrapper[4854]: I1125 10:21:01.566305 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9" event={"ID":"897c33c5-2a69-4163-a965-7ebe1881ce1e","Type":"ContainerDied","Data":"7540dd02098b49ef115ece1085100fec63ea7ede2880e6557812634d636ed752"} Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.139015 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.244377 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/897c33c5-2a69-4163-a965-7ebe1881ce1e-libvirt-secret-0\") pod \"897c33c5-2a69-4163-a965-7ebe1881ce1e\" (UID: \"897c33c5-2a69-4163-a965-7ebe1881ce1e\") " Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.244423 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/897c33c5-2a69-4163-a965-7ebe1881ce1e-ssh-key\") pod \"897c33c5-2a69-4163-a965-7ebe1881ce1e\" (UID: \"897c33c5-2a69-4163-a965-7ebe1881ce1e\") " Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.244532 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/897c33c5-2a69-4163-a965-7ebe1881ce1e-libvirt-combined-ca-bundle\") pod \"897c33c5-2a69-4163-a965-7ebe1881ce1e\" (UID: \"897c33c5-2a69-4163-a965-7ebe1881ce1e\") " Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.244688 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k5xsg\" (UniqueName: \"kubernetes.io/projected/897c33c5-2a69-4163-a965-7ebe1881ce1e-kube-api-access-k5xsg\") pod \"897c33c5-2a69-4163-a965-7ebe1881ce1e\" (UID: \"897c33c5-2a69-4163-a965-7ebe1881ce1e\") " Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.244830 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/897c33c5-2a69-4163-a965-7ebe1881ce1e-inventory\") pod \"897c33c5-2a69-4163-a965-7ebe1881ce1e\" (UID: \"897c33c5-2a69-4163-a965-7ebe1881ce1e\") " Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.250378 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/897c33c5-2a69-4163-a965-7ebe1881ce1e-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "897c33c5-2a69-4163-a965-7ebe1881ce1e" (UID: "897c33c5-2a69-4163-a965-7ebe1881ce1e"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.251238 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/897c33c5-2a69-4163-a965-7ebe1881ce1e-kube-api-access-k5xsg" (OuterVolumeSpecName: "kube-api-access-k5xsg") pod "897c33c5-2a69-4163-a965-7ebe1881ce1e" (UID: "897c33c5-2a69-4163-a965-7ebe1881ce1e"). InnerVolumeSpecName "kube-api-access-k5xsg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.310665 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/897c33c5-2a69-4163-a965-7ebe1881ce1e-inventory" (OuterVolumeSpecName: "inventory") pod "897c33c5-2a69-4163-a965-7ebe1881ce1e" (UID: "897c33c5-2a69-4163-a965-7ebe1881ce1e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.331106 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/897c33c5-2a69-4163-a965-7ebe1881ce1e-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "897c33c5-2a69-4163-a965-7ebe1881ce1e" (UID: "897c33c5-2a69-4163-a965-7ebe1881ce1e"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.331929 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/897c33c5-2a69-4163-a965-7ebe1881ce1e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "897c33c5-2a69-4163-a965-7ebe1881ce1e" (UID: "897c33c5-2a69-4163-a965-7ebe1881ce1e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.353641 4854 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/897c33c5-2a69-4163-a965-7ebe1881ce1e-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.353688 4854 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/897c33c5-2a69-4163-a965-7ebe1881ce1e-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.353700 4854 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/897c33c5-2a69-4163-a965-7ebe1881ce1e-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.353712 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k5xsg\" (UniqueName: \"kubernetes.io/projected/897c33c5-2a69-4163-a965-7ebe1881ce1e-kube-api-access-k5xsg\") on node \"crc\" DevicePath \"\"" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.353720 4854 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/897c33c5-2a69-4163-a965-7ebe1881ce1e-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.621036 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9" event={"ID":"897c33c5-2a69-4163-a965-7ebe1881ce1e","Type":"ContainerDied","Data":"6908231b4ffdf17055f6278a3025c4b2c9b065099e1ca333742d1babfca29917"} Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.621084 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6908231b4ffdf17055f6278a3025c4b2c9b065099e1ca333742d1babfca29917" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.621107 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.701223 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r"] Nov 25 10:21:03 crc kubenswrapper[4854]: E1125 10:21:03.702218 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="faf48b6a-fafa-4388-a425-d1a0fe191e96" containerName="extract-utilities" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.702243 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="faf48b6a-fafa-4388-a425-d1a0fe191e96" containerName="extract-utilities" Nov 25 10:21:03 crc kubenswrapper[4854]: E1125 10:21:03.702267 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="faf48b6a-fafa-4388-a425-d1a0fe191e96" containerName="extract-content" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.702275 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="faf48b6a-fafa-4388-a425-d1a0fe191e96" containerName="extract-content" Nov 25 10:21:03 crc kubenswrapper[4854]: E1125 10:21:03.702287 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="897c33c5-2a69-4163-a965-7ebe1881ce1e" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.702296 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="897c33c5-2a69-4163-a965-7ebe1881ce1e" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 25 10:21:03 crc kubenswrapper[4854]: E1125 10:21:03.702372 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="faf48b6a-fafa-4388-a425-d1a0fe191e96" containerName="registry-server" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.702384 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="faf48b6a-fafa-4388-a425-d1a0fe191e96" containerName="registry-server" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.702700 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="897c33c5-2a69-4163-a965-7ebe1881ce1e" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.702743 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="faf48b6a-fafa-4388-a425-d1a0fe191e96" containerName="registry-server" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.704032 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.706840 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.709962 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-6xbdw" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.710242 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.710411 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.710541 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.710650 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.710603 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.725992 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r"] Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.773569 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-dlf9r\" (UID: \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.773631 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-dlf9r\" (UID: \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.773793 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-dlf9r\" (UID: \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.773859 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8sh44\" (UniqueName: \"kubernetes.io/projected/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-kube-api-access-8sh44\") pod \"nova-edpm-deployment-openstack-edpm-ipam-dlf9r\" (UID: \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.774020 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-dlf9r\" (UID: \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.774151 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-dlf9r\" (UID: \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.774205 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-dlf9r\" (UID: \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.774246 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-dlf9r\" (UID: \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.774455 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-dlf9r\" (UID: \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.877239 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-dlf9r\" (UID: \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.877284 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-dlf9r\" (UID: \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.877324 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-dlf9r\" (UID: \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.877359 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8sh44\" (UniqueName: \"kubernetes.io/projected/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-kube-api-access-8sh44\") pod \"nova-edpm-deployment-openstack-edpm-ipam-dlf9r\" (UID: \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.877391 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-dlf9r\" (UID: \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.877462 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-dlf9r\" (UID: \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.877489 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-dlf9r\" (UID: \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.877515 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-dlf9r\" (UID: \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.877597 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-dlf9r\" (UID: \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.879946 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-dlf9r\" (UID: \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.882987 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-dlf9r\" (UID: \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.885596 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-dlf9r\" (UID: \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.894824 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-dlf9r\" (UID: \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.895007 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-dlf9r\" (UID: \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.895248 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-dlf9r\" (UID: \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.895266 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-dlf9r\" (UID: \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.898460 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-dlf9r\" (UID: \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r" Nov 25 10:21:03 crc kubenswrapper[4854]: I1125 10:21:03.898722 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8sh44\" (UniqueName: \"kubernetes.io/projected/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-kube-api-access-8sh44\") pod \"nova-edpm-deployment-openstack-edpm-ipam-dlf9r\" (UID: \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r" Nov 25 10:21:04 crc kubenswrapper[4854]: I1125 10:21:04.080599 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r" Nov 25 10:21:04 crc kubenswrapper[4854]: I1125 10:21:04.748539 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r"] Nov 25 10:21:05 crc kubenswrapper[4854]: I1125 10:21:05.646093 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r" event={"ID":"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047","Type":"ContainerStarted","Data":"02dcbac63989bde09ea3a3d95941a7f8bcd2bc75d20be6873da308278a857bac"} Nov 25 10:21:06 crc kubenswrapper[4854]: I1125 10:21:06.659760 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r" event={"ID":"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047","Type":"ContainerStarted","Data":"719a29e5f0ab01d469290e6baabe7163a2e8d615c86d16f18dd20bbeb4e027d2"} Nov 25 10:21:06 crc kubenswrapper[4854]: I1125 10:21:06.723963 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r" podStartSLOduration=2.826404529 podStartE2EDuration="3.723944619s" podCreationTimestamp="2025-11-25 10:21:03 +0000 UTC" firstStartedPulling="2025-11-25 10:21:04.757993039 +0000 UTC m=+2670.610986415" lastFinishedPulling="2025-11-25 10:21:05.655533129 +0000 UTC m=+2671.508526505" observedRunningTime="2025-11-25 10:21:06.714531131 +0000 UTC m=+2672.567524517" watchObservedRunningTime="2025-11-25 10:21:06.723944619 +0000 UTC m=+2672.576937995" Nov 25 10:21:55 crc kubenswrapper[4854]: I1125 10:21:55.029741 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:21:55 crc kubenswrapper[4854]: I1125 10:21:55.030277 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:22:25 crc kubenswrapper[4854]: I1125 10:22:25.036074 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:22:25 crc kubenswrapper[4854]: I1125 10:22:25.036858 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:22:55 crc kubenswrapper[4854]: I1125 10:22:55.042240 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:22:55 crc kubenswrapper[4854]: I1125 10:22:55.043899 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:22:55 crc kubenswrapper[4854]: I1125 10:22:55.044010 4854 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" Nov 25 10:22:55 crc kubenswrapper[4854]: I1125 10:22:55.044966 4854 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d4dce61a59fd748b9095cb222d9a2995882c94f0f1c60c5b9043718eded2f02b"} pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 10:22:55 crc kubenswrapper[4854]: I1125 10:22:55.045133 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" containerID="cri-o://d4dce61a59fd748b9095cb222d9a2995882c94f0f1c60c5b9043718eded2f02b" gracePeriod=600 Nov 25 10:22:55 crc kubenswrapper[4854]: I1125 10:22:55.473464 4854 generic.go:334] "Generic (PLEG): container finished" podID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerID="d4dce61a59fd748b9095cb222d9a2995882c94f0f1c60c5b9043718eded2f02b" exitCode=0 Nov 25 10:22:55 crc kubenswrapper[4854]: I1125 10:22:55.473558 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerDied","Data":"d4dce61a59fd748b9095cb222d9a2995882c94f0f1c60c5b9043718eded2f02b"} Nov 25 10:22:55 crc kubenswrapper[4854]: I1125 10:22:55.473855 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerStarted","Data":"047d06823f958a969b84e35dd1c97a09fe47f798d94dae3729c5fe72dd409d06"} Nov 25 10:22:55 crc kubenswrapper[4854]: I1125 10:22:55.473879 4854 scope.go:117] "RemoveContainer" containerID="087635fd301f31cb709ca3e31dcfb761f6ed178214d846b50fc60a19c19a65a8" Nov 25 10:23:57 crc kubenswrapper[4854]: I1125 10:23:57.154144 4854 generic.go:334] "Generic (PLEG): container finished" podID="a73a64c8-e9bd-44ce-a844-a7ab9b1c3047" containerID="719a29e5f0ab01d469290e6baabe7163a2e8d615c86d16f18dd20bbeb4e027d2" exitCode=0 Nov 25 10:23:57 crc kubenswrapper[4854]: I1125 10:23:57.154232 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r" event={"ID":"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047","Type":"ContainerDied","Data":"719a29e5f0ab01d469290e6baabe7163a2e8d615c86d16f18dd20bbeb4e027d2"} Nov 25 10:23:58 crc kubenswrapper[4854]: I1125 10:23:58.732392 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r" Nov 25 10:23:58 crc kubenswrapper[4854]: I1125 10:23:58.772125 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-ssh-key\") pod \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\" (UID: \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\") " Nov 25 10:23:58 crc kubenswrapper[4854]: I1125 10:23:58.772293 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-inventory\") pod \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\" (UID: \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\") " Nov 25 10:23:58 crc kubenswrapper[4854]: I1125 10:23:58.772381 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-nova-migration-ssh-key-1\") pod \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\" (UID: \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\") " Nov 25 10:23:58 crc kubenswrapper[4854]: I1125 10:23:58.772416 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8sh44\" (UniqueName: \"kubernetes.io/projected/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-kube-api-access-8sh44\") pod \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\" (UID: \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\") " Nov 25 10:23:58 crc kubenswrapper[4854]: I1125 10:23:58.772460 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-nova-combined-ca-bundle\") pod \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\" (UID: \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\") " Nov 25 10:23:58 crc kubenswrapper[4854]: I1125 10:23:58.772503 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-nova-migration-ssh-key-0\") pod \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\" (UID: \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\") " Nov 25 10:23:58 crc kubenswrapper[4854]: I1125 10:23:58.772556 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-nova-cell1-compute-config-0\") pod \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\" (UID: \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\") " Nov 25 10:23:58 crc kubenswrapper[4854]: I1125 10:23:58.772750 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-nova-extra-config-0\") pod \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\" (UID: \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\") " Nov 25 10:23:58 crc kubenswrapper[4854]: I1125 10:23:58.772808 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-nova-cell1-compute-config-1\") pod \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\" (UID: \"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047\") " Nov 25 10:23:58 crc kubenswrapper[4854]: I1125 10:23:58.780251 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "a73a64c8-e9bd-44ce-a844-a7ab9b1c3047" (UID: "a73a64c8-e9bd-44ce-a844-a7ab9b1c3047"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:23:58 crc kubenswrapper[4854]: I1125 10:23:58.791170 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-kube-api-access-8sh44" (OuterVolumeSpecName: "kube-api-access-8sh44") pod "a73a64c8-e9bd-44ce-a844-a7ab9b1c3047" (UID: "a73a64c8-e9bd-44ce-a844-a7ab9b1c3047"). InnerVolumeSpecName "kube-api-access-8sh44". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:23:58 crc kubenswrapper[4854]: I1125 10:23:58.809147 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-inventory" (OuterVolumeSpecName: "inventory") pod "a73a64c8-e9bd-44ce-a844-a7ab9b1c3047" (UID: "a73a64c8-e9bd-44ce-a844-a7ab9b1c3047"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:23:58 crc kubenswrapper[4854]: I1125 10:23:58.812484 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "a73a64c8-e9bd-44ce-a844-a7ab9b1c3047" (UID: "a73a64c8-e9bd-44ce-a844-a7ab9b1c3047"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:23:58 crc kubenswrapper[4854]: I1125 10:23:58.812507 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a73a64c8-e9bd-44ce-a844-a7ab9b1c3047" (UID: "a73a64c8-e9bd-44ce-a844-a7ab9b1c3047"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:23:58 crc kubenswrapper[4854]: I1125 10:23:58.819344 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "a73a64c8-e9bd-44ce-a844-a7ab9b1c3047" (UID: "a73a64c8-e9bd-44ce-a844-a7ab9b1c3047"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:23:58 crc kubenswrapper[4854]: I1125 10:23:58.820415 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "a73a64c8-e9bd-44ce-a844-a7ab9b1c3047" (UID: "a73a64c8-e9bd-44ce-a844-a7ab9b1c3047"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:23:58 crc kubenswrapper[4854]: I1125 10:23:58.822983 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "a73a64c8-e9bd-44ce-a844-a7ab9b1c3047" (UID: "a73a64c8-e9bd-44ce-a844-a7ab9b1c3047"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:23:58 crc kubenswrapper[4854]: I1125 10:23:58.839555 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "a73a64c8-e9bd-44ce-a844-a7ab9b1c3047" (UID: "a73a64c8-e9bd-44ce-a844-a7ab9b1c3047"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:23:58 crc kubenswrapper[4854]: I1125 10:23:58.876712 4854 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:58 crc kubenswrapper[4854]: I1125 10:23:58.876752 4854 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:58 crc kubenswrapper[4854]: I1125 10:23:58.876765 4854 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:58 crc kubenswrapper[4854]: I1125 10:23:58.876774 4854 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:58 crc kubenswrapper[4854]: I1125 10:23:58.876782 4854 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:58 crc kubenswrapper[4854]: I1125 10:23:58.876791 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8sh44\" (UniqueName: \"kubernetes.io/projected/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-kube-api-access-8sh44\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:58 crc kubenswrapper[4854]: I1125 10:23:58.876805 4854 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:58 crc kubenswrapper[4854]: I1125 10:23:58.876821 4854 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:58 crc kubenswrapper[4854]: I1125 10:23:58.876833 4854 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/a73a64c8-e9bd-44ce-a844-a7ab9b1c3047-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:59 crc kubenswrapper[4854]: I1125 10:23:59.181630 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r" event={"ID":"a73a64c8-e9bd-44ce-a844-a7ab9b1c3047","Type":"ContainerDied","Data":"02dcbac63989bde09ea3a3d95941a7f8bcd2bc75d20be6873da308278a857bac"} Nov 25 10:23:59 crc kubenswrapper[4854]: I1125 10:23:59.181965 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="02dcbac63989bde09ea3a3d95941a7f8bcd2bc75d20be6873da308278a857bac" Nov 25 10:23:59 crc kubenswrapper[4854]: I1125 10:23:59.183691 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-dlf9r" Nov 25 10:23:59 crc kubenswrapper[4854]: I1125 10:23:59.315027 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-drzgj"] Nov 25 10:23:59 crc kubenswrapper[4854]: E1125 10:23:59.316419 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a73a64c8-e9bd-44ce-a844-a7ab9b1c3047" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 25 10:23:59 crc kubenswrapper[4854]: I1125 10:23:59.316448 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="a73a64c8-e9bd-44ce-a844-a7ab9b1c3047" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 25 10:23:59 crc kubenswrapper[4854]: I1125 10:23:59.316940 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="a73a64c8-e9bd-44ce-a844-a7ab9b1c3047" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 25 10:23:59 crc kubenswrapper[4854]: I1125 10:23:59.319362 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-drzgj" Nov 25 10:23:59 crc kubenswrapper[4854]: I1125 10:23:59.323201 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Nov 25 10:23:59 crc kubenswrapper[4854]: I1125 10:23:59.323507 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:23:59 crc kubenswrapper[4854]: I1125 10:23:59.323695 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-6xbdw" Nov 25 10:23:59 crc kubenswrapper[4854]: I1125 10:23:59.325235 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 10:23:59 crc kubenswrapper[4854]: I1125 10:23:59.328414 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 10:23:59 crc kubenswrapper[4854]: I1125 10:23:59.332003 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-drzgj"] Nov 25 10:23:59 crc kubenswrapper[4854]: I1125 10:23:59.395659 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7f6c02cf-5044-461e-92d6-107c2e965a7a-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-drzgj\" (UID: \"7f6c02cf-5044-461e-92d6-107c2e965a7a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-drzgj" Nov 25 10:23:59 crc kubenswrapper[4854]: I1125 10:23:59.395745 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/7f6c02cf-5044-461e-92d6-107c2e965a7a-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-drzgj\" (UID: \"7f6c02cf-5044-461e-92d6-107c2e965a7a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-drzgj" Nov 25 10:23:59 crc kubenswrapper[4854]: I1125 10:23:59.395807 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f6c02cf-5044-461e-92d6-107c2e965a7a-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-drzgj\" (UID: \"7f6c02cf-5044-461e-92d6-107c2e965a7a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-drzgj" Nov 25 10:23:59 crc kubenswrapper[4854]: I1125 10:23:59.395862 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/7f6c02cf-5044-461e-92d6-107c2e965a7a-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-drzgj\" (UID: \"7f6c02cf-5044-461e-92d6-107c2e965a7a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-drzgj" Nov 25 10:23:59 crc kubenswrapper[4854]: I1125 10:23:59.396023 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gpp9x\" (UniqueName: \"kubernetes.io/projected/7f6c02cf-5044-461e-92d6-107c2e965a7a-kube-api-access-gpp9x\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-drzgj\" (UID: \"7f6c02cf-5044-461e-92d6-107c2e965a7a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-drzgj" Nov 25 10:23:59 crc kubenswrapper[4854]: I1125 10:23:59.396104 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/7f6c02cf-5044-461e-92d6-107c2e965a7a-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-drzgj\" (UID: \"7f6c02cf-5044-461e-92d6-107c2e965a7a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-drzgj" Nov 25 10:23:59 crc kubenswrapper[4854]: I1125 10:23:59.396128 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7f6c02cf-5044-461e-92d6-107c2e965a7a-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-drzgj\" (UID: \"7f6c02cf-5044-461e-92d6-107c2e965a7a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-drzgj" Nov 25 10:23:59 crc kubenswrapper[4854]: I1125 10:23:59.498453 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gpp9x\" (UniqueName: \"kubernetes.io/projected/7f6c02cf-5044-461e-92d6-107c2e965a7a-kube-api-access-gpp9x\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-drzgj\" (UID: \"7f6c02cf-5044-461e-92d6-107c2e965a7a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-drzgj" Nov 25 10:23:59 crc kubenswrapper[4854]: I1125 10:23:59.498541 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/7f6c02cf-5044-461e-92d6-107c2e965a7a-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-drzgj\" (UID: \"7f6c02cf-5044-461e-92d6-107c2e965a7a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-drzgj" Nov 25 10:23:59 crc kubenswrapper[4854]: I1125 10:23:59.498567 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7f6c02cf-5044-461e-92d6-107c2e965a7a-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-drzgj\" (UID: \"7f6c02cf-5044-461e-92d6-107c2e965a7a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-drzgj" Nov 25 10:23:59 crc kubenswrapper[4854]: I1125 10:23:59.498696 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7f6c02cf-5044-461e-92d6-107c2e965a7a-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-drzgj\" (UID: \"7f6c02cf-5044-461e-92d6-107c2e965a7a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-drzgj" Nov 25 10:23:59 crc kubenswrapper[4854]: I1125 10:23:59.498728 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/7f6c02cf-5044-461e-92d6-107c2e965a7a-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-drzgj\" (UID: \"7f6c02cf-5044-461e-92d6-107c2e965a7a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-drzgj" Nov 25 10:23:59 crc kubenswrapper[4854]: I1125 10:23:59.498765 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f6c02cf-5044-461e-92d6-107c2e965a7a-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-drzgj\" (UID: \"7f6c02cf-5044-461e-92d6-107c2e965a7a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-drzgj" Nov 25 10:23:59 crc kubenswrapper[4854]: I1125 10:23:59.498804 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/7f6c02cf-5044-461e-92d6-107c2e965a7a-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-drzgj\" (UID: \"7f6c02cf-5044-461e-92d6-107c2e965a7a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-drzgj" Nov 25 10:23:59 crc kubenswrapper[4854]: I1125 10:23:59.503518 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7f6c02cf-5044-461e-92d6-107c2e965a7a-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-drzgj\" (UID: \"7f6c02cf-5044-461e-92d6-107c2e965a7a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-drzgj" Nov 25 10:23:59 crc kubenswrapper[4854]: I1125 10:23:59.504075 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7f6c02cf-5044-461e-92d6-107c2e965a7a-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-drzgj\" (UID: \"7f6c02cf-5044-461e-92d6-107c2e965a7a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-drzgj" Nov 25 10:23:59 crc kubenswrapper[4854]: I1125 10:23:59.504288 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/7f6c02cf-5044-461e-92d6-107c2e965a7a-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-drzgj\" (UID: \"7f6c02cf-5044-461e-92d6-107c2e965a7a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-drzgj" Nov 25 10:23:59 crc kubenswrapper[4854]: I1125 10:23:59.504409 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/7f6c02cf-5044-461e-92d6-107c2e965a7a-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-drzgj\" (UID: \"7f6c02cf-5044-461e-92d6-107c2e965a7a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-drzgj" Nov 25 10:23:59 crc kubenswrapper[4854]: I1125 10:23:59.505822 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/7f6c02cf-5044-461e-92d6-107c2e965a7a-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-drzgj\" (UID: \"7f6c02cf-5044-461e-92d6-107c2e965a7a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-drzgj" Nov 25 10:23:59 crc kubenswrapper[4854]: I1125 10:23:59.507117 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f6c02cf-5044-461e-92d6-107c2e965a7a-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-drzgj\" (UID: \"7f6c02cf-5044-461e-92d6-107c2e965a7a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-drzgj" Nov 25 10:23:59 crc kubenswrapper[4854]: I1125 10:23:59.519191 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gpp9x\" (UniqueName: \"kubernetes.io/projected/7f6c02cf-5044-461e-92d6-107c2e965a7a-kube-api-access-gpp9x\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-drzgj\" (UID: \"7f6c02cf-5044-461e-92d6-107c2e965a7a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-drzgj" Nov 25 10:23:59 crc kubenswrapper[4854]: I1125 10:23:59.648317 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-drzgj" Nov 25 10:24:00 crc kubenswrapper[4854]: I1125 10:24:00.244796 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-drzgj"] Nov 25 10:24:01 crc kubenswrapper[4854]: I1125 10:24:01.209606 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-drzgj" event={"ID":"7f6c02cf-5044-461e-92d6-107c2e965a7a","Type":"ContainerStarted","Data":"4a6449963c9d64c7e517398078aa97afb2d9dfc5b6c5c7bbf5b065d86af52edf"} Nov 25 10:24:02 crc kubenswrapper[4854]: I1125 10:24:02.223203 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-drzgj" event={"ID":"7f6c02cf-5044-461e-92d6-107c2e965a7a","Type":"ContainerStarted","Data":"4c8ff89cae438cf3b1f2e884d0051fc1b654b9aed404c03c559cc19f8e9b510c"} Nov 25 10:24:02 crc kubenswrapper[4854]: I1125 10:24:02.242436 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-drzgj" podStartSLOduration=2.419384843 podStartE2EDuration="3.242410091s" podCreationTimestamp="2025-11-25 10:23:59 +0000 UTC" firstStartedPulling="2025-11-25 10:24:00.251479116 +0000 UTC m=+2846.104472492" lastFinishedPulling="2025-11-25 10:24:01.074504364 +0000 UTC m=+2846.927497740" observedRunningTime="2025-11-25 10:24:02.239309706 +0000 UTC m=+2848.092303092" watchObservedRunningTime="2025-11-25 10:24:02.242410091 +0000 UTC m=+2848.095403467" Nov 25 10:24:55 crc kubenswrapper[4854]: I1125 10:24:55.035936 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:24:55 crc kubenswrapper[4854]: I1125 10:24:55.036780 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:25:25 crc kubenswrapper[4854]: I1125 10:25:25.028614 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:25:25 crc kubenswrapper[4854]: I1125 10:25:25.029079 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:25:55 crc kubenswrapper[4854]: I1125 10:25:55.029002 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:25:55 crc kubenswrapper[4854]: I1125 10:25:55.029565 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:25:55 crc kubenswrapper[4854]: I1125 10:25:55.029619 4854 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" Nov 25 10:25:55 crc kubenswrapper[4854]: I1125 10:25:55.031094 4854 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"047d06823f958a969b84e35dd1c97a09fe47f798d94dae3729c5fe72dd409d06"} pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 10:25:55 crc kubenswrapper[4854]: I1125 10:25:55.031158 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" containerID="cri-o://047d06823f958a969b84e35dd1c97a09fe47f798d94dae3729c5fe72dd409d06" gracePeriod=600 Nov 25 10:25:55 crc kubenswrapper[4854]: E1125 10:25:55.156837 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:25:55 crc kubenswrapper[4854]: I1125 10:25:55.563285 4854 generic.go:334] "Generic (PLEG): container finished" podID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerID="047d06823f958a969b84e35dd1c97a09fe47f798d94dae3729c5fe72dd409d06" exitCode=0 Nov 25 10:25:55 crc kubenswrapper[4854]: I1125 10:25:55.563330 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerDied","Data":"047d06823f958a969b84e35dd1c97a09fe47f798d94dae3729c5fe72dd409d06"} Nov 25 10:25:55 crc kubenswrapper[4854]: I1125 10:25:55.563365 4854 scope.go:117] "RemoveContainer" containerID="d4dce61a59fd748b9095cb222d9a2995882c94f0f1c60c5b9043718eded2f02b" Nov 25 10:25:55 crc kubenswrapper[4854]: I1125 10:25:55.564768 4854 scope.go:117] "RemoveContainer" containerID="047d06823f958a969b84e35dd1c97a09fe47f798d94dae3729c5fe72dd409d06" Nov 25 10:25:55 crc kubenswrapper[4854]: E1125 10:25:55.566418 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:26:07 crc kubenswrapper[4854]: I1125 10:26:07.013636 4854 scope.go:117] "RemoveContainer" containerID="047d06823f958a969b84e35dd1c97a09fe47f798d94dae3729c5fe72dd409d06" Nov 25 10:26:07 crc kubenswrapper[4854]: E1125 10:26:07.014439 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:26:20 crc kubenswrapper[4854]: I1125 10:26:20.014594 4854 scope.go:117] "RemoveContainer" containerID="047d06823f958a969b84e35dd1c97a09fe47f798d94dae3729c5fe72dd409d06" Nov 25 10:26:20 crc kubenswrapper[4854]: E1125 10:26:20.015437 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:26:32 crc kubenswrapper[4854]: I1125 10:26:32.014448 4854 scope.go:117] "RemoveContainer" containerID="047d06823f958a969b84e35dd1c97a09fe47f798d94dae3729c5fe72dd409d06" Nov 25 10:26:32 crc kubenswrapper[4854]: E1125 10:26:32.015483 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:26:38 crc kubenswrapper[4854]: I1125 10:26:38.058110 4854 generic.go:334] "Generic (PLEG): container finished" podID="7f6c02cf-5044-461e-92d6-107c2e965a7a" containerID="4c8ff89cae438cf3b1f2e884d0051fc1b654b9aed404c03c559cc19f8e9b510c" exitCode=0 Nov 25 10:26:38 crc kubenswrapper[4854]: I1125 10:26:38.058201 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-drzgj" event={"ID":"7f6c02cf-5044-461e-92d6-107c2e965a7a","Type":"ContainerDied","Data":"4c8ff89cae438cf3b1f2e884d0051fc1b654b9aed404c03c559cc19f8e9b510c"} Nov 25 10:26:39 crc kubenswrapper[4854]: I1125 10:26:39.592385 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-drzgj" Nov 25 10:26:39 crc kubenswrapper[4854]: I1125 10:26:39.685174 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/7f6c02cf-5044-461e-92d6-107c2e965a7a-ceilometer-compute-config-data-2\") pod \"7f6c02cf-5044-461e-92d6-107c2e965a7a\" (UID: \"7f6c02cf-5044-461e-92d6-107c2e965a7a\") " Nov 25 10:26:39 crc kubenswrapper[4854]: I1125 10:26:39.685317 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7f6c02cf-5044-461e-92d6-107c2e965a7a-inventory\") pod \"7f6c02cf-5044-461e-92d6-107c2e965a7a\" (UID: \"7f6c02cf-5044-461e-92d6-107c2e965a7a\") " Nov 25 10:26:39 crc kubenswrapper[4854]: I1125 10:26:39.685391 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/7f6c02cf-5044-461e-92d6-107c2e965a7a-ceilometer-compute-config-data-0\") pod \"7f6c02cf-5044-461e-92d6-107c2e965a7a\" (UID: \"7f6c02cf-5044-461e-92d6-107c2e965a7a\") " Nov 25 10:26:39 crc kubenswrapper[4854]: I1125 10:26:39.685521 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f6c02cf-5044-461e-92d6-107c2e965a7a-telemetry-combined-ca-bundle\") pod \"7f6c02cf-5044-461e-92d6-107c2e965a7a\" (UID: \"7f6c02cf-5044-461e-92d6-107c2e965a7a\") " Nov 25 10:26:39 crc kubenswrapper[4854]: I1125 10:26:39.685557 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gpp9x\" (UniqueName: \"kubernetes.io/projected/7f6c02cf-5044-461e-92d6-107c2e965a7a-kube-api-access-gpp9x\") pod \"7f6c02cf-5044-461e-92d6-107c2e965a7a\" (UID: \"7f6c02cf-5044-461e-92d6-107c2e965a7a\") " Nov 25 10:26:39 crc kubenswrapper[4854]: I1125 10:26:39.685632 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7f6c02cf-5044-461e-92d6-107c2e965a7a-ssh-key\") pod \"7f6c02cf-5044-461e-92d6-107c2e965a7a\" (UID: \"7f6c02cf-5044-461e-92d6-107c2e965a7a\") " Nov 25 10:26:39 crc kubenswrapper[4854]: I1125 10:26:39.685810 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/7f6c02cf-5044-461e-92d6-107c2e965a7a-ceilometer-compute-config-data-1\") pod \"7f6c02cf-5044-461e-92d6-107c2e965a7a\" (UID: \"7f6c02cf-5044-461e-92d6-107c2e965a7a\") " Nov 25 10:26:39 crc kubenswrapper[4854]: I1125 10:26:39.701895 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f6c02cf-5044-461e-92d6-107c2e965a7a-kube-api-access-gpp9x" (OuterVolumeSpecName: "kube-api-access-gpp9x") pod "7f6c02cf-5044-461e-92d6-107c2e965a7a" (UID: "7f6c02cf-5044-461e-92d6-107c2e965a7a"). InnerVolumeSpecName "kube-api-access-gpp9x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:26:39 crc kubenswrapper[4854]: I1125 10:26:39.702482 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f6c02cf-5044-461e-92d6-107c2e965a7a-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "7f6c02cf-5044-461e-92d6-107c2e965a7a" (UID: "7f6c02cf-5044-461e-92d6-107c2e965a7a"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:26:39 crc kubenswrapper[4854]: I1125 10:26:39.728296 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f6c02cf-5044-461e-92d6-107c2e965a7a-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "7f6c02cf-5044-461e-92d6-107c2e965a7a" (UID: "7f6c02cf-5044-461e-92d6-107c2e965a7a"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:26:39 crc kubenswrapper[4854]: I1125 10:26:39.733195 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f6c02cf-5044-461e-92d6-107c2e965a7a-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "7f6c02cf-5044-461e-92d6-107c2e965a7a" (UID: "7f6c02cf-5044-461e-92d6-107c2e965a7a"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:26:39 crc kubenswrapper[4854]: I1125 10:26:39.736999 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f6c02cf-5044-461e-92d6-107c2e965a7a-inventory" (OuterVolumeSpecName: "inventory") pod "7f6c02cf-5044-461e-92d6-107c2e965a7a" (UID: "7f6c02cf-5044-461e-92d6-107c2e965a7a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:26:39 crc kubenswrapper[4854]: I1125 10:26:39.738926 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f6c02cf-5044-461e-92d6-107c2e965a7a-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "7f6c02cf-5044-461e-92d6-107c2e965a7a" (UID: "7f6c02cf-5044-461e-92d6-107c2e965a7a"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:26:39 crc kubenswrapper[4854]: I1125 10:26:39.742499 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f6c02cf-5044-461e-92d6-107c2e965a7a-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "7f6c02cf-5044-461e-92d6-107c2e965a7a" (UID: "7f6c02cf-5044-461e-92d6-107c2e965a7a"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:26:39 crc kubenswrapper[4854]: I1125 10:26:39.790340 4854 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7f6c02cf-5044-461e-92d6-107c2e965a7a-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:39 crc kubenswrapper[4854]: I1125 10:26:39.790398 4854 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/7f6c02cf-5044-461e-92d6-107c2e965a7a-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:39 crc kubenswrapper[4854]: I1125 10:26:39.790416 4854 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f6c02cf-5044-461e-92d6-107c2e965a7a-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:39 crc kubenswrapper[4854]: I1125 10:26:39.790430 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gpp9x\" (UniqueName: \"kubernetes.io/projected/7f6c02cf-5044-461e-92d6-107c2e965a7a-kube-api-access-gpp9x\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:39 crc kubenswrapper[4854]: I1125 10:26:39.790442 4854 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7f6c02cf-5044-461e-92d6-107c2e965a7a-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:39 crc kubenswrapper[4854]: I1125 10:26:39.790456 4854 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/7f6c02cf-5044-461e-92d6-107c2e965a7a-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:39 crc kubenswrapper[4854]: I1125 10:26:39.790469 4854 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/7f6c02cf-5044-461e-92d6-107c2e965a7a-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:40 crc kubenswrapper[4854]: I1125 10:26:40.080116 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-drzgj" event={"ID":"7f6c02cf-5044-461e-92d6-107c2e965a7a","Type":"ContainerDied","Data":"4a6449963c9d64c7e517398078aa97afb2d9dfc5b6c5c7bbf5b065d86af52edf"} Nov 25 10:26:40 crc kubenswrapper[4854]: I1125 10:26:40.080157 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4a6449963c9d64c7e517398078aa97afb2d9dfc5b6c5c7bbf5b065d86af52edf" Nov 25 10:26:40 crc kubenswrapper[4854]: I1125 10:26:40.080173 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-drzgj" Nov 25 10:26:40 crc kubenswrapper[4854]: I1125 10:26:40.160402 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc"] Nov 25 10:26:40 crc kubenswrapper[4854]: E1125 10:26:40.161018 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f6c02cf-5044-461e-92d6-107c2e965a7a" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 25 10:26:40 crc kubenswrapper[4854]: I1125 10:26:40.161038 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f6c02cf-5044-461e-92d6-107c2e965a7a" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 25 10:26:40 crc kubenswrapper[4854]: I1125 10:26:40.161259 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f6c02cf-5044-461e-92d6-107c2e965a7a" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 25 10:26:40 crc kubenswrapper[4854]: I1125 10:26:40.162177 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc" Nov 25 10:26:40 crc kubenswrapper[4854]: I1125 10:26:40.167844 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-ipmi-config-data" Nov 25 10:26:40 crc kubenswrapper[4854]: I1125 10:26:40.168195 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 10:26:40 crc kubenswrapper[4854]: I1125 10:26:40.168339 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-6xbdw" Nov 25 10:26:40 crc kubenswrapper[4854]: I1125 10:26:40.169552 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:26:40 crc kubenswrapper[4854]: I1125 10:26:40.169873 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 10:26:40 crc kubenswrapper[4854]: I1125 10:26:40.175709 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc"] Nov 25 10:26:40 crc kubenswrapper[4854]: I1125 10:26:40.301317 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-ssh-key\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc\" (UID: \"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc" Nov 25 10:26:40 crc kubenswrapper[4854]: I1125 10:26:40.301383 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-ceilometer-ipmi-config-data-1\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc\" (UID: \"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc" Nov 25 10:26:40 crc kubenswrapper[4854]: I1125 10:26:40.301455 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-telemetry-power-monitoring-combined-ca-bundle\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc\" (UID: \"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc" Nov 25 10:26:40 crc kubenswrapper[4854]: I1125 10:26:40.301649 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-ceilometer-ipmi-config-data-2\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc\" (UID: \"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc" Nov 25 10:26:40 crc kubenswrapper[4854]: I1125 10:26:40.301927 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-inventory\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc\" (UID: \"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc" Nov 25 10:26:40 crc kubenswrapper[4854]: I1125 10:26:40.302007 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g74sc\" (UniqueName: \"kubernetes.io/projected/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-kube-api-access-g74sc\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc\" (UID: \"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc" Nov 25 10:26:40 crc kubenswrapper[4854]: I1125 10:26:40.302390 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-ceilometer-ipmi-config-data-0\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc\" (UID: \"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc" Nov 25 10:26:40 crc kubenswrapper[4854]: I1125 10:26:40.404447 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g74sc\" (UniqueName: \"kubernetes.io/projected/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-kube-api-access-g74sc\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc\" (UID: \"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc" Nov 25 10:26:40 crc kubenswrapper[4854]: I1125 10:26:40.404942 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-ceilometer-ipmi-config-data-0\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc\" (UID: \"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc" Nov 25 10:26:40 crc kubenswrapper[4854]: I1125 10:26:40.405000 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-ssh-key\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc\" (UID: \"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc" Nov 25 10:26:40 crc kubenswrapper[4854]: I1125 10:26:40.405059 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-ceilometer-ipmi-config-data-1\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc\" (UID: \"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc" Nov 25 10:26:40 crc kubenswrapper[4854]: I1125 10:26:40.405182 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-telemetry-power-monitoring-combined-ca-bundle\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc\" (UID: \"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc" Nov 25 10:26:40 crc kubenswrapper[4854]: I1125 10:26:40.405233 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-ceilometer-ipmi-config-data-2\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc\" (UID: \"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc" Nov 25 10:26:40 crc kubenswrapper[4854]: I1125 10:26:40.405473 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-inventory\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc\" (UID: \"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc" Nov 25 10:26:40 crc kubenswrapper[4854]: I1125 10:26:40.411453 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-ceilometer-ipmi-config-data-1\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc\" (UID: \"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc" Nov 25 10:26:40 crc kubenswrapper[4854]: I1125 10:26:40.411666 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-telemetry-power-monitoring-combined-ca-bundle\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc\" (UID: \"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc" Nov 25 10:26:40 crc kubenswrapper[4854]: I1125 10:26:40.412236 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-ceilometer-ipmi-config-data-2\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc\" (UID: \"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc" Nov 25 10:26:40 crc kubenswrapper[4854]: I1125 10:26:40.412447 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-ssh-key\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc\" (UID: \"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc" Nov 25 10:26:40 crc kubenswrapper[4854]: I1125 10:26:40.414715 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-ceilometer-ipmi-config-data-0\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc\" (UID: \"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc" Nov 25 10:26:40 crc kubenswrapper[4854]: I1125 10:26:40.415406 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-inventory\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc\" (UID: \"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc" Nov 25 10:26:40 crc kubenswrapper[4854]: I1125 10:26:40.435945 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g74sc\" (UniqueName: \"kubernetes.io/projected/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-kube-api-access-g74sc\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc\" (UID: \"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc" Nov 25 10:26:40 crc kubenswrapper[4854]: I1125 10:26:40.487199 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc" Nov 25 10:26:41 crc kubenswrapper[4854]: I1125 10:26:41.054863 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc"] Nov 25 10:26:41 crc kubenswrapper[4854]: I1125 10:26:41.060315 4854 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 10:26:41 crc kubenswrapper[4854]: I1125 10:26:41.091760 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc" event={"ID":"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044","Type":"ContainerStarted","Data":"2434c9fcc940c3c0a1867016b3ffcc9983975a4409fa1ac9e3ccd38cd8de9642"} Nov 25 10:26:44 crc kubenswrapper[4854]: I1125 10:26:44.013753 4854 scope.go:117] "RemoveContainer" containerID="047d06823f958a969b84e35dd1c97a09fe47f798d94dae3729c5fe72dd409d06" Nov 25 10:26:44 crc kubenswrapper[4854]: E1125 10:26:44.014684 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:26:44 crc kubenswrapper[4854]: I1125 10:26:44.124360 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc" event={"ID":"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044","Type":"ContainerStarted","Data":"e0c906ed27f36e4f6bc4e3a5dbc99a37cc7e83ff76f744c12416d3ff5d25f378"} Nov 25 10:26:44 crc kubenswrapper[4854]: I1125 10:26:44.143022 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc" podStartSLOduration=2.48239936 podStartE2EDuration="4.143006289s" podCreationTimestamp="2025-11-25 10:26:40 +0000 UTC" firstStartedPulling="2025-11-25 10:26:41.060056401 +0000 UTC m=+3006.913049787" lastFinishedPulling="2025-11-25 10:26:42.72066332 +0000 UTC m=+3008.573656716" observedRunningTime="2025-11-25 10:26:44.141516199 +0000 UTC m=+3009.994509585" watchObservedRunningTime="2025-11-25 10:26:44.143006289 +0000 UTC m=+3009.995999665" Nov 25 10:26:56 crc kubenswrapper[4854]: I1125 10:26:56.013570 4854 scope.go:117] "RemoveContainer" containerID="047d06823f958a969b84e35dd1c97a09fe47f798d94dae3729c5fe72dd409d06" Nov 25 10:26:56 crc kubenswrapper[4854]: E1125 10:26:56.014449 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:27:09 crc kubenswrapper[4854]: I1125 10:27:09.013573 4854 scope.go:117] "RemoveContainer" containerID="047d06823f958a969b84e35dd1c97a09fe47f798d94dae3729c5fe72dd409d06" Nov 25 10:27:09 crc kubenswrapper[4854]: E1125 10:27:09.014388 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:27:21 crc kubenswrapper[4854]: I1125 10:27:21.014418 4854 scope.go:117] "RemoveContainer" containerID="047d06823f958a969b84e35dd1c97a09fe47f798d94dae3729c5fe72dd409d06" Nov 25 10:27:21 crc kubenswrapper[4854]: E1125 10:27:21.015238 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:27:32 crc kubenswrapper[4854]: I1125 10:27:32.013685 4854 scope.go:117] "RemoveContainer" containerID="047d06823f958a969b84e35dd1c97a09fe47f798d94dae3729c5fe72dd409d06" Nov 25 10:27:32 crc kubenswrapper[4854]: E1125 10:27:32.014859 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:27:45 crc kubenswrapper[4854]: I1125 10:27:45.021959 4854 scope.go:117] "RemoveContainer" containerID="047d06823f958a969b84e35dd1c97a09fe47f798d94dae3729c5fe72dd409d06" Nov 25 10:27:45 crc kubenswrapper[4854]: E1125 10:27:45.022576 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:27:56 crc kubenswrapper[4854]: I1125 10:27:56.013632 4854 scope.go:117] "RemoveContainer" containerID="047d06823f958a969b84e35dd1c97a09fe47f798d94dae3729c5fe72dd409d06" Nov 25 10:27:56 crc kubenswrapper[4854]: E1125 10:27:56.014560 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:28:05 crc kubenswrapper[4854]: I1125 10:28:05.058737 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-jl79w"] Nov 25 10:28:05 crc kubenswrapper[4854]: I1125 10:28:05.061750 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jl79w" Nov 25 10:28:05 crc kubenswrapper[4854]: I1125 10:28:05.100499 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jl79w"] Nov 25 10:28:05 crc kubenswrapper[4854]: I1125 10:28:05.164997 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9-catalog-content\") pod \"redhat-marketplace-jl79w\" (UID: \"b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9\") " pod="openshift-marketplace/redhat-marketplace-jl79w" Nov 25 10:28:05 crc kubenswrapper[4854]: I1125 10:28:05.165572 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r7dbz\" (UniqueName: \"kubernetes.io/projected/b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9-kube-api-access-r7dbz\") pod \"redhat-marketplace-jl79w\" (UID: \"b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9\") " pod="openshift-marketplace/redhat-marketplace-jl79w" Nov 25 10:28:05 crc kubenswrapper[4854]: I1125 10:28:05.165832 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9-utilities\") pod \"redhat-marketplace-jl79w\" (UID: \"b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9\") " pod="openshift-marketplace/redhat-marketplace-jl79w" Nov 25 10:28:05 crc kubenswrapper[4854]: I1125 10:28:05.267688 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9-catalog-content\") pod \"redhat-marketplace-jl79w\" (UID: \"b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9\") " pod="openshift-marketplace/redhat-marketplace-jl79w" Nov 25 10:28:05 crc kubenswrapper[4854]: I1125 10:28:05.267883 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r7dbz\" (UniqueName: \"kubernetes.io/projected/b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9-kube-api-access-r7dbz\") pod \"redhat-marketplace-jl79w\" (UID: \"b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9\") " pod="openshift-marketplace/redhat-marketplace-jl79w" Nov 25 10:28:05 crc kubenswrapper[4854]: I1125 10:28:05.267978 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9-utilities\") pod \"redhat-marketplace-jl79w\" (UID: \"b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9\") " pod="openshift-marketplace/redhat-marketplace-jl79w" Nov 25 10:28:05 crc kubenswrapper[4854]: I1125 10:28:05.268487 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9-utilities\") pod \"redhat-marketplace-jl79w\" (UID: \"b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9\") " pod="openshift-marketplace/redhat-marketplace-jl79w" Nov 25 10:28:05 crc kubenswrapper[4854]: I1125 10:28:05.268659 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9-catalog-content\") pod \"redhat-marketplace-jl79w\" (UID: \"b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9\") " pod="openshift-marketplace/redhat-marketplace-jl79w" Nov 25 10:28:05 crc kubenswrapper[4854]: I1125 10:28:05.296979 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r7dbz\" (UniqueName: \"kubernetes.io/projected/b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9-kube-api-access-r7dbz\") pod \"redhat-marketplace-jl79w\" (UID: \"b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9\") " pod="openshift-marketplace/redhat-marketplace-jl79w" Nov 25 10:28:05 crc kubenswrapper[4854]: I1125 10:28:05.387470 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jl79w" Nov 25 10:28:05 crc kubenswrapper[4854]: I1125 10:28:05.946890 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jl79w"] Nov 25 10:28:06 crc kubenswrapper[4854]: I1125 10:28:06.061274 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jl79w" event={"ID":"b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9","Type":"ContainerStarted","Data":"41945731239ba291ab836fb506d978df458f92d3bdf559cf99d62aec0e870e19"} Nov 25 10:28:07 crc kubenswrapper[4854]: I1125 10:28:07.074696 4854 generic.go:334] "Generic (PLEG): container finished" podID="b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9" containerID="c64d302902441fda356e7886f1b53f21ba45cd34447d31eb8045330b06e83ef1" exitCode=0 Nov 25 10:28:07 crc kubenswrapper[4854]: I1125 10:28:07.074772 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jl79w" event={"ID":"b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9","Type":"ContainerDied","Data":"c64d302902441fda356e7886f1b53f21ba45cd34447d31eb8045330b06e83ef1"} Nov 25 10:28:07 crc kubenswrapper[4854]: I1125 10:28:07.441119 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-nvglz"] Nov 25 10:28:07 crc kubenswrapper[4854]: I1125 10:28:07.444412 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nvglz" Nov 25 10:28:07 crc kubenswrapper[4854]: I1125 10:28:07.479759 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-nvglz"] Nov 25 10:28:07 crc kubenswrapper[4854]: I1125 10:28:07.535899 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5r6cq\" (UniqueName: \"kubernetes.io/projected/dab4003e-2d83-4c6a-8c8d-529ea93cc8e8-kube-api-access-5r6cq\") pod \"certified-operators-nvglz\" (UID: \"dab4003e-2d83-4c6a-8c8d-529ea93cc8e8\") " pod="openshift-marketplace/certified-operators-nvglz" Nov 25 10:28:07 crc kubenswrapper[4854]: I1125 10:28:07.536458 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dab4003e-2d83-4c6a-8c8d-529ea93cc8e8-utilities\") pod \"certified-operators-nvglz\" (UID: \"dab4003e-2d83-4c6a-8c8d-529ea93cc8e8\") " pod="openshift-marketplace/certified-operators-nvglz" Nov 25 10:28:07 crc kubenswrapper[4854]: I1125 10:28:07.536753 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dab4003e-2d83-4c6a-8c8d-529ea93cc8e8-catalog-content\") pod \"certified-operators-nvglz\" (UID: \"dab4003e-2d83-4c6a-8c8d-529ea93cc8e8\") " pod="openshift-marketplace/certified-operators-nvglz" Nov 25 10:28:07 crc kubenswrapper[4854]: I1125 10:28:07.647309 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dab4003e-2d83-4c6a-8c8d-529ea93cc8e8-utilities\") pod \"certified-operators-nvglz\" (UID: \"dab4003e-2d83-4c6a-8c8d-529ea93cc8e8\") " pod="openshift-marketplace/certified-operators-nvglz" Nov 25 10:28:07 crc kubenswrapper[4854]: I1125 10:28:07.647529 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dab4003e-2d83-4c6a-8c8d-529ea93cc8e8-catalog-content\") pod \"certified-operators-nvglz\" (UID: \"dab4003e-2d83-4c6a-8c8d-529ea93cc8e8\") " pod="openshift-marketplace/certified-operators-nvglz" Nov 25 10:28:07 crc kubenswrapper[4854]: I1125 10:28:07.647751 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5r6cq\" (UniqueName: \"kubernetes.io/projected/dab4003e-2d83-4c6a-8c8d-529ea93cc8e8-kube-api-access-5r6cq\") pod \"certified-operators-nvglz\" (UID: \"dab4003e-2d83-4c6a-8c8d-529ea93cc8e8\") " pod="openshift-marketplace/certified-operators-nvglz" Nov 25 10:28:07 crc kubenswrapper[4854]: I1125 10:28:07.648488 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dab4003e-2d83-4c6a-8c8d-529ea93cc8e8-utilities\") pod \"certified-operators-nvglz\" (UID: \"dab4003e-2d83-4c6a-8c8d-529ea93cc8e8\") " pod="openshift-marketplace/certified-operators-nvglz" Nov 25 10:28:07 crc kubenswrapper[4854]: I1125 10:28:07.648792 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dab4003e-2d83-4c6a-8c8d-529ea93cc8e8-catalog-content\") pod \"certified-operators-nvglz\" (UID: \"dab4003e-2d83-4c6a-8c8d-529ea93cc8e8\") " pod="openshift-marketplace/certified-operators-nvglz" Nov 25 10:28:07 crc kubenswrapper[4854]: I1125 10:28:07.693046 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5r6cq\" (UniqueName: \"kubernetes.io/projected/dab4003e-2d83-4c6a-8c8d-529ea93cc8e8-kube-api-access-5r6cq\") pod \"certified-operators-nvglz\" (UID: \"dab4003e-2d83-4c6a-8c8d-529ea93cc8e8\") " pod="openshift-marketplace/certified-operators-nvglz" Nov 25 10:28:07 crc kubenswrapper[4854]: I1125 10:28:07.771622 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nvglz" Nov 25 10:28:08 crc kubenswrapper[4854]: I1125 10:28:08.014530 4854 scope.go:117] "RemoveContainer" containerID="047d06823f958a969b84e35dd1c97a09fe47f798d94dae3729c5fe72dd409d06" Nov 25 10:28:08 crc kubenswrapper[4854]: E1125 10:28:08.015338 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:28:08 crc kubenswrapper[4854]: I1125 10:28:08.424197 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-nvglz"] Nov 25 10:28:09 crc kubenswrapper[4854]: I1125 10:28:09.141592 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jl79w" event={"ID":"b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9","Type":"ContainerStarted","Data":"4983e1fbf20dde42e95c4cc5a2520b3226696bf23f80740612a496e98041e4f0"} Nov 25 10:28:09 crc kubenswrapper[4854]: I1125 10:28:09.143982 4854 generic.go:334] "Generic (PLEG): container finished" podID="dab4003e-2d83-4c6a-8c8d-529ea93cc8e8" containerID="574a470cdba5ac83f0b57ed3a9cde76d2f9c6393d26e719d904c339a9ec44bf9" exitCode=0 Nov 25 10:28:09 crc kubenswrapper[4854]: I1125 10:28:09.144020 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nvglz" event={"ID":"dab4003e-2d83-4c6a-8c8d-529ea93cc8e8","Type":"ContainerDied","Data":"574a470cdba5ac83f0b57ed3a9cde76d2f9c6393d26e719d904c339a9ec44bf9"} Nov 25 10:28:09 crc kubenswrapper[4854]: I1125 10:28:09.144042 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nvglz" event={"ID":"dab4003e-2d83-4c6a-8c8d-529ea93cc8e8","Type":"ContainerStarted","Data":"be53f462ce0c1e1845bf0f97e2156bd7a5397ca2a2a9571fa2fe6e419ab3dd96"} Nov 25 10:28:10 crc kubenswrapper[4854]: I1125 10:28:10.158600 4854 generic.go:334] "Generic (PLEG): container finished" podID="b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9" containerID="4983e1fbf20dde42e95c4cc5a2520b3226696bf23f80740612a496e98041e4f0" exitCode=0 Nov 25 10:28:10 crc kubenswrapper[4854]: I1125 10:28:10.158744 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jl79w" event={"ID":"b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9","Type":"ContainerDied","Data":"4983e1fbf20dde42e95c4cc5a2520b3226696bf23f80740612a496e98041e4f0"} Nov 25 10:28:11 crc kubenswrapper[4854]: I1125 10:28:11.195085 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jl79w" event={"ID":"b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9","Type":"ContainerStarted","Data":"442dc771cf8fbf2bf51ca82897367ffa1a6d638dc9f803d0a0694c1f10102cb8"} Nov 25 10:28:11 crc kubenswrapper[4854]: I1125 10:28:11.204286 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nvglz" event={"ID":"dab4003e-2d83-4c6a-8c8d-529ea93cc8e8","Type":"ContainerStarted","Data":"30b53d3c54a5d2a1dc009b7ddf9577400a08e0a17a617605fb73c15012d26724"} Nov 25 10:28:11 crc kubenswrapper[4854]: I1125 10:28:11.220546 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-jl79w" podStartSLOduration=2.7331601169999997 podStartE2EDuration="6.220509564s" podCreationTimestamp="2025-11-25 10:28:05 +0000 UTC" firstStartedPulling="2025-11-25 10:28:07.078791727 +0000 UTC m=+3092.931785103" lastFinishedPulling="2025-11-25 10:28:10.566141174 +0000 UTC m=+3096.419134550" observedRunningTime="2025-11-25 10:28:11.219608919 +0000 UTC m=+3097.072602485" watchObservedRunningTime="2025-11-25 10:28:11.220509564 +0000 UTC m=+3097.073502940" Nov 25 10:28:13 crc kubenswrapper[4854]: I1125 10:28:13.232599 4854 generic.go:334] "Generic (PLEG): container finished" podID="dab4003e-2d83-4c6a-8c8d-529ea93cc8e8" containerID="30b53d3c54a5d2a1dc009b7ddf9577400a08e0a17a617605fb73c15012d26724" exitCode=0 Nov 25 10:28:13 crc kubenswrapper[4854]: I1125 10:28:13.232687 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nvglz" event={"ID":"dab4003e-2d83-4c6a-8c8d-529ea93cc8e8","Type":"ContainerDied","Data":"30b53d3c54a5d2a1dc009b7ddf9577400a08e0a17a617605fb73c15012d26724"} Nov 25 10:28:14 crc kubenswrapper[4854]: I1125 10:28:14.253196 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nvglz" event={"ID":"dab4003e-2d83-4c6a-8c8d-529ea93cc8e8","Type":"ContainerStarted","Data":"07b9ffd0ab308ace837ea5a7609159241a97572df1fea04a958d2d809b1e421b"} Nov 25 10:28:14 crc kubenswrapper[4854]: I1125 10:28:14.295554 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-nvglz" podStartSLOduration=2.799444091 podStartE2EDuration="7.295531416s" podCreationTimestamp="2025-11-25 10:28:07 +0000 UTC" firstStartedPulling="2025-11-25 10:28:09.14653327 +0000 UTC m=+3094.999526636" lastFinishedPulling="2025-11-25 10:28:13.642620585 +0000 UTC m=+3099.495613961" observedRunningTime="2025-11-25 10:28:14.294305402 +0000 UTC m=+3100.147298778" watchObservedRunningTime="2025-11-25 10:28:14.295531416 +0000 UTC m=+3100.148524792" Nov 25 10:28:15 crc kubenswrapper[4854]: I1125 10:28:15.387955 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-jl79w" Nov 25 10:28:15 crc kubenswrapper[4854]: I1125 10:28:15.388218 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-jl79w" Nov 25 10:28:15 crc kubenswrapper[4854]: I1125 10:28:15.445405 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-jl79w" Nov 25 10:28:16 crc kubenswrapper[4854]: I1125 10:28:16.332346 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-jl79w" Nov 25 10:28:17 crc kubenswrapper[4854]: I1125 10:28:17.036099 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jl79w"] Nov 25 10:28:17 crc kubenswrapper[4854]: I1125 10:28:17.772313 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-nvglz" Nov 25 10:28:17 crc kubenswrapper[4854]: I1125 10:28:17.772905 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-nvglz" Nov 25 10:28:17 crc kubenswrapper[4854]: I1125 10:28:17.832954 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-nvglz" Nov 25 10:28:18 crc kubenswrapper[4854]: I1125 10:28:18.297470 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-jl79w" podUID="b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9" containerName="registry-server" containerID="cri-o://442dc771cf8fbf2bf51ca82897367ffa1a6d638dc9f803d0a0694c1f10102cb8" gracePeriod=2 Nov 25 10:28:18 crc kubenswrapper[4854]: I1125 10:28:18.360517 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-nvglz" Nov 25 10:28:18 crc kubenswrapper[4854]: I1125 10:28:18.828690 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jl79w" Nov 25 10:28:18 crc kubenswrapper[4854]: I1125 10:28:18.869264 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r7dbz\" (UniqueName: \"kubernetes.io/projected/b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9-kube-api-access-r7dbz\") pod \"b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9\" (UID: \"b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9\") " Nov 25 10:28:18 crc kubenswrapper[4854]: I1125 10:28:18.869342 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9-catalog-content\") pod \"b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9\" (UID: \"b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9\") " Nov 25 10:28:18 crc kubenswrapper[4854]: I1125 10:28:18.869383 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9-utilities\") pod \"b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9\" (UID: \"b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9\") " Nov 25 10:28:18 crc kubenswrapper[4854]: I1125 10:28:18.870419 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9-utilities" (OuterVolumeSpecName: "utilities") pod "b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9" (UID: "b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:28:18 crc kubenswrapper[4854]: I1125 10:28:18.886279 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9-kube-api-access-r7dbz" (OuterVolumeSpecName: "kube-api-access-r7dbz") pod "b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9" (UID: "b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9"). InnerVolumeSpecName "kube-api-access-r7dbz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:28:18 crc kubenswrapper[4854]: I1125 10:28:18.893927 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9" (UID: "b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:28:18 crc kubenswrapper[4854]: I1125 10:28:18.972369 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r7dbz\" (UniqueName: \"kubernetes.io/projected/b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9-kube-api-access-r7dbz\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:18 crc kubenswrapper[4854]: I1125 10:28:18.972610 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:18 crc kubenswrapper[4854]: I1125 10:28:18.972621 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:19 crc kubenswrapper[4854]: I1125 10:28:19.015767 4854 scope.go:117] "RemoveContainer" containerID="047d06823f958a969b84e35dd1c97a09fe47f798d94dae3729c5fe72dd409d06" Nov 25 10:28:19 crc kubenswrapper[4854]: E1125 10:28:19.016141 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:28:19 crc kubenswrapper[4854]: I1125 10:28:19.310135 4854 generic.go:334] "Generic (PLEG): container finished" podID="b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9" containerID="442dc771cf8fbf2bf51ca82897367ffa1a6d638dc9f803d0a0694c1f10102cb8" exitCode=0 Nov 25 10:28:19 crc kubenswrapper[4854]: I1125 10:28:19.310190 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jl79w" Nov 25 10:28:19 crc kubenswrapper[4854]: I1125 10:28:19.310222 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jl79w" event={"ID":"b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9","Type":"ContainerDied","Data":"442dc771cf8fbf2bf51ca82897367ffa1a6d638dc9f803d0a0694c1f10102cb8"} Nov 25 10:28:19 crc kubenswrapper[4854]: I1125 10:28:19.310285 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jl79w" event={"ID":"b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9","Type":"ContainerDied","Data":"41945731239ba291ab836fb506d978df458f92d3bdf559cf99d62aec0e870e19"} Nov 25 10:28:19 crc kubenswrapper[4854]: I1125 10:28:19.310312 4854 scope.go:117] "RemoveContainer" containerID="442dc771cf8fbf2bf51ca82897367ffa1a6d638dc9f803d0a0694c1f10102cb8" Nov 25 10:28:19 crc kubenswrapper[4854]: I1125 10:28:19.344059 4854 scope.go:117] "RemoveContainer" containerID="4983e1fbf20dde42e95c4cc5a2520b3226696bf23f80740612a496e98041e4f0" Nov 25 10:28:19 crc kubenswrapper[4854]: I1125 10:28:19.351963 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jl79w"] Nov 25 10:28:19 crc kubenswrapper[4854]: I1125 10:28:19.363767 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-jl79w"] Nov 25 10:28:19 crc kubenswrapper[4854]: I1125 10:28:19.384380 4854 scope.go:117] "RemoveContainer" containerID="c64d302902441fda356e7886f1b53f21ba45cd34447d31eb8045330b06e83ef1" Nov 25 10:28:19 crc kubenswrapper[4854]: I1125 10:28:19.452747 4854 scope.go:117] "RemoveContainer" containerID="442dc771cf8fbf2bf51ca82897367ffa1a6d638dc9f803d0a0694c1f10102cb8" Nov 25 10:28:19 crc kubenswrapper[4854]: E1125 10:28:19.453379 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"442dc771cf8fbf2bf51ca82897367ffa1a6d638dc9f803d0a0694c1f10102cb8\": container with ID starting with 442dc771cf8fbf2bf51ca82897367ffa1a6d638dc9f803d0a0694c1f10102cb8 not found: ID does not exist" containerID="442dc771cf8fbf2bf51ca82897367ffa1a6d638dc9f803d0a0694c1f10102cb8" Nov 25 10:28:19 crc kubenswrapper[4854]: I1125 10:28:19.453412 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"442dc771cf8fbf2bf51ca82897367ffa1a6d638dc9f803d0a0694c1f10102cb8"} err="failed to get container status \"442dc771cf8fbf2bf51ca82897367ffa1a6d638dc9f803d0a0694c1f10102cb8\": rpc error: code = NotFound desc = could not find container \"442dc771cf8fbf2bf51ca82897367ffa1a6d638dc9f803d0a0694c1f10102cb8\": container with ID starting with 442dc771cf8fbf2bf51ca82897367ffa1a6d638dc9f803d0a0694c1f10102cb8 not found: ID does not exist" Nov 25 10:28:19 crc kubenswrapper[4854]: I1125 10:28:19.453438 4854 scope.go:117] "RemoveContainer" containerID="4983e1fbf20dde42e95c4cc5a2520b3226696bf23f80740612a496e98041e4f0" Nov 25 10:28:19 crc kubenswrapper[4854]: E1125 10:28:19.453890 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4983e1fbf20dde42e95c4cc5a2520b3226696bf23f80740612a496e98041e4f0\": container with ID starting with 4983e1fbf20dde42e95c4cc5a2520b3226696bf23f80740612a496e98041e4f0 not found: ID does not exist" containerID="4983e1fbf20dde42e95c4cc5a2520b3226696bf23f80740612a496e98041e4f0" Nov 25 10:28:19 crc kubenswrapper[4854]: I1125 10:28:19.453959 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4983e1fbf20dde42e95c4cc5a2520b3226696bf23f80740612a496e98041e4f0"} err="failed to get container status \"4983e1fbf20dde42e95c4cc5a2520b3226696bf23f80740612a496e98041e4f0\": rpc error: code = NotFound desc = could not find container \"4983e1fbf20dde42e95c4cc5a2520b3226696bf23f80740612a496e98041e4f0\": container with ID starting with 4983e1fbf20dde42e95c4cc5a2520b3226696bf23f80740612a496e98041e4f0 not found: ID does not exist" Nov 25 10:28:19 crc kubenswrapper[4854]: I1125 10:28:19.454010 4854 scope.go:117] "RemoveContainer" containerID="c64d302902441fda356e7886f1b53f21ba45cd34447d31eb8045330b06e83ef1" Nov 25 10:28:19 crc kubenswrapper[4854]: E1125 10:28:19.456303 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c64d302902441fda356e7886f1b53f21ba45cd34447d31eb8045330b06e83ef1\": container with ID starting with c64d302902441fda356e7886f1b53f21ba45cd34447d31eb8045330b06e83ef1 not found: ID does not exist" containerID="c64d302902441fda356e7886f1b53f21ba45cd34447d31eb8045330b06e83ef1" Nov 25 10:28:19 crc kubenswrapper[4854]: I1125 10:28:19.456332 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c64d302902441fda356e7886f1b53f21ba45cd34447d31eb8045330b06e83ef1"} err="failed to get container status \"c64d302902441fda356e7886f1b53f21ba45cd34447d31eb8045330b06e83ef1\": rpc error: code = NotFound desc = could not find container \"c64d302902441fda356e7886f1b53f21ba45cd34447d31eb8045330b06e83ef1\": container with ID starting with c64d302902441fda356e7886f1b53f21ba45cd34447d31eb8045330b06e83ef1 not found: ID does not exist" Nov 25 10:28:20 crc kubenswrapper[4854]: I1125 10:28:20.231333 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-nvglz"] Nov 25 10:28:21 crc kubenswrapper[4854]: I1125 10:28:21.030294 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9" path="/var/lib/kubelet/pods/b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9/volumes" Nov 25 10:28:21 crc kubenswrapper[4854]: I1125 10:28:21.334494 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-nvglz" podUID="dab4003e-2d83-4c6a-8c8d-529ea93cc8e8" containerName="registry-server" containerID="cri-o://07b9ffd0ab308ace837ea5a7609159241a97572df1fea04a958d2d809b1e421b" gracePeriod=2 Nov 25 10:28:22 crc kubenswrapper[4854]: I1125 10:28:22.042660 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nvglz" Nov 25 10:28:22 crc kubenswrapper[4854]: I1125 10:28:22.162018 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dab4003e-2d83-4c6a-8c8d-529ea93cc8e8-utilities\") pod \"dab4003e-2d83-4c6a-8c8d-529ea93cc8e8\" (UID: \"dab4003e-2d83-4c6a-8c8d-529ea93cc8e8\") " Nov 25 10:28:22 crc kubenswrapper[4854]: I1125 10:28:22.162421 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5r6cq\" (UniqueName: \"kubernetes.io/projected/dab4003e-2d83-4c6a-8c8d-529ea93cc8e8-kube-api-access-5r6cq\") pod \"dab4003e-2d83-4c6a-8c8d-529ea93cc8e8\" (UID: \"dab4003e-2d83-4c6a-8c8d-529ea93cc8e8\") " Nov 25 10:28:22 crc kubenswrapper[4854]: I1125 10:28:22.162609 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dab4003e-2d83-4c6a-8c8d-529ea93cc8e8-catalog-content\") pod \"dab4003e-2d83-4c6a-8c8d-529ea93cc8e8\" (UID: \"dab4003e-2d83-4c6a-8c8d-529ea93cc8e8\") " Nov 25 10:28:22 crc kubenswrapper[4854]: I1125 10:28:22.163164 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dab4003e-2d83-4c6a-8c8d-529ea93cc8e8-utilities" (OuterVolumeSpecName: "utilities") pod "dab4003e-2d83-4c6a-8c8d-529ea93cc8e8" (UID: "dab4003e-2d83-4c6a-8c8d-529ea93cc8e8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:28:22 crc kubenswrapper[4854]: I1125 10:28:22.163683 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dab4003e-2d83-4c6a-8c8d-529ea93cc8e8-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:22 crc kubenswrapper[4854]: I1125 10:28:22.167590 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dab4003e-2d83-4c6a-8c8d-529ea93cc8e8-kube-api-access-5r6cq" (OuterVolumeSpecName: "kube-api-access-5r6cq") pod "dab4003e-2d83-4c6a-8c8d-529ea93cc8e8" (UID: "dab4003e-2d83-4c6a-8c8d-529ea93cc8e8"). InnerVolumeSpecName "kube-api-access-5r6cq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:28:22 crc kubenswrapper[4854]: I1125 10:28:22.217824 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dab4003e-2d83-4c6a-8c8d-529ea93cc8e8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dab4003e-2d83-4c6a-8c8d-529ea93cc8e8" (UID: "dab4003e-2d83-4c6a-8c8d-529ea93cc8e8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:28:22 crc kubenswrapper[4854]: I1125 10:28:22.265860 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5r6cq\" (UniqueName: \"kubernetes.io/projected/dab4003e-2d83-4c6a-8c8d-529ea93cc8e8-kube-api-access-5r6cq\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:22 crc kubenswrapper[4854]: I1125 10:28:22.265903 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dab4003e-2d83-4c6a-8c8d-529ea93cc8e8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:22 crc kubenswrapper[4854]: I1125 10:28:22.349099 4854 generic.go:334] "Generic (PLEG): container finished" podID="dab4003e-2d83-4c6a-8c8d-529ea93cc8e8" containerID="07b9ffd0ab308ace837ea5a7609159241a97572df1fea04a958d2d809b1e421b" exitCode=0 Nov 25 10:28:22 crc kubenswrapper[4854]: I1125 10:28:22.349181 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nvglz" event={"ID":"dab4003e-2d83-4c6a-8c8d-529ea93cc8e8","Type":"ContainerDied","Data":"07b9ffd0ab308ace837ea5a7609159241a97572df1fea04a958d2d809b1e421b"} Nov 25 10:28:22 crc kubenswrapper[4854]: I1125 10:28:22.349347 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nvglz" event={"ID":"dab4003e-2d83-4c6a-8c8d-529ea93cc8e8","Type":"ContainerDied","Data":"be53f462ce0c1e1845bf0f97e2156bd7a5397ca2a2a9571fa2fe6e419ab3dd96"} Nov 25 10:28:22 crc kubenswrapper[4854]: I1125 10:28:22.349367 4854 scope.go:117] "RemoveContainer" containerID="07b9ffd0ab308ace837ea5a7609159241a97572df1fea04a958d2d809b1e421b" Nov 25 10:28:22 crc kubenswrapper[4854]: I1125 10:28:22.349209 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nvglz" Nov 25 10:28:22 crc kubenswrapper[4854]: I1125 10:28:22.384030 4854 scope.go:117] "RemoveContainer" containerID="30b53d3c54a5d2a1dc009b7ddf9577400a08e0a17a617605fb73c15012d26724" Nov 25 10:28:22 crc kubenswrapper[4854]: I1125 10:28:22.400283 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-nvglz"] Nov 25 10:28:22 crc kubenswrapper[4854]: I1125 10:28:22.418262 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-nvglz"] Nov 25 10:28:22 crc kubenswrapper[4854]: I1125 10:28:22.429041 4854 scope.go:117] "RemoveContainer" containerID="574a470cdba5ac83f0b57ed3a9cde76d2f9c6393d26e719d904c339a9ec44bf9" Nov 25 10:28:22 crc kubenswrapper[4854]: I1125 10:28:22.482806 4854 scope.go:117] "RemoveContainer" containerID="07b9ffd0ab308ace837ea5a7609159241a97572df1fea04a958d2d809b1e421b" Nov 25 10:28:22 crc kubenswrapper[4854]: E1125 10:28:22.483419 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"07b9ffd0ab308ace837ea5a7609159241a97572df1fea04a958d2d809b1e421b\": container with ID starting with 07b9ffd0ab308ace837ea5a7609159241a97572df1fea04a958d2d809b1e421b not found: ID does not exist" containerID="07b9ffd0ab308ace837ea5a7609159241a97572df1fea04a958d2d809b1e421b" Nov 25 10:28:22 crc kubenswrapper[4854]: I1125 10:28:22.483489 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"07b9ffd0ab308ace837ea5a7609159241a97572df1fea04a958d2d809b1e421b"} err="failed to get container status \"07b9ffd0ab308ace837ea5a7609159241a97572df1fea04a958d2d809b1e421b\": rpc error: code = NotFound desc = could not find container \"07b9ffd0ab308ace837ea5a7609159241a97572df1fea04a958d2d809b1e421b\": container with ID starting with 07b9ffd0ab308ace837ea5a7609159241a97572df1fea04a958d2d809b1e421b not found: ID does not exist" Nov 25 10:28:22 crc kubenswrapper[4854]: I1125 10:28:22.483517 4854 scope.go:117] "RemoveContainer" containerID="30b53d3c54a5d2a1dc009b7ddf9577400a08e0a17a617605fb73c15012d26724" Nov 25 10:28:22 crc kubenswrapper[4854]: E1125 10:28:22.484043 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"30b53d3c54a5d2a1dc009b7ddf9577400a08e0a17a617605fb73c15012d26724\": container with ID starting with 30b53d3c54a5d2a1dc009b7ddf9577400a08e0a17a617605fb73c15012d26724 not found: ID does not exist" containerID="30b53d3c54a5d2a1dc009b7ddf9577400a08e0a17a617605fb73c15012d26724" Nov 25 10:28:22 crc kubenswrapper[4854]: I1125 10:28:22.484076 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"30b53d3c54a5d2a1dc009b7ddf9577400a08e0a17a617605fb73c15012d26724"} err="failed to get container status \"30b53d3c54a5d2a1dc009b7ddf9577400a08e0a17a617605fb73c15012d26724\": rpc error: code = NotFound desc = could not find container \"30b53d3c54a5d2a1dc009b7ddf9577400a08e0a17a617605fb73c15012d26724\": container with ID starting with 30b53d3c54a5d2a1dc009b7ddf9577400a08e0a17a617605fb73c15012d26724 not found: ID does not exist" Nov 25 10:28:22 crc kubenswrapper[4854]: I1125 10:28:22.484097 4854 scope.go:117] "RemoveContainer" containerID="574a470cdba5ac83f0b57ed3a9cde76d2f9c6393d26e719d904c339a9ec44bf9" Nov 25 10:28:22 crc kubenswrapper[4854]: E1125 10:28:22.484587 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"574a470cdba5ac83f0b57ed3a9cde76d2f9c6393d26e719d904c339a9ec44bf9\": container with ID starting with 574a470cdba5ac83f0b57ed3a9cde76d2f9c6393d26e719d904c339a9ec44bf9 not found: ID does not exist" containerID="574a470cdba5ac83f0b57ed3a9cde76d2f9c6393d26e719d904c339a9ec44bf9" Nov 25 10:28:22 crc kubenswrapper[4854]: I1125 10:28:22.484623 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"574a470cdba5ac83f0b57ed3a9cde76d2f9c6393d26e719d904c339a9ec44bf9"} err="failed to get container status \"574a470cdba5ac83f0b57ed3a9cde76d2f9c6393d26e719d904c339a9ec44bf9\": rpc error: code = NotFound desc = could not find container \"574a470cdba5ac83f0b57ed3a9cde76d2f9c6393d26e719d904c339a9ec44bf9\": container with ID starting with 574a470cdba5ac83f0b57ed3a9cde76d2f9c6393d26e719d904c339a9ec44bf9 not found: ID does not exist" Nov 25 10:28:23 crc kubenswrapper[4854]: I1125 10:28:23.032243 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dab4003e-2d83-4c6a-8c8d-529ea93cc8e8" path="/var/lib/kubelet/pods/dab4003e-2d83-4c6a-8c8d-529ea93cc8e8/volumes" Nov 25 10:28:31 crc kubenswrapper[4854]: I1125 10:28:31.014097 4854 scope.go:117] "RemoveContainer" containerID="047d06823f958a969b84e35dd1c97a09fe47f798d94dae3729c5fe72dd409d06" Nov 25 10:28:31 crc kubenswrapper[4854]: E1125 10:28:31.014961 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:28:35 crc kubenswrapper[4854]: I1125 10:28:35.518623 4854 generic.go:334] "Generic (PLEG): container finished" podID="a09aa3eb-06ca-4f41-9d00-0b13b8bd8044" containerID="e0c906ed27f36e4f6bc4e3a5dbc99a37cc7e83ff76f744c12416d3ff5d25f378" exitCode=0 Nov 25 10:28:35 crc kubenswrapper[4854]: I1125 10:28:35.518734 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc" event={"ID":"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044","Type":"ContainerDied","Data":"e0c906ed27f36e4f6bc4e3a5dbc99a37cc7e83ff76f744c12416d3ff5d25f378"} Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.139496 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.266931 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g74sc\" (UniqueName: \"kubernetes.io/projected/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-kube-api-access-g74sc\") pod \"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044\" (UID: \"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044\") " Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.267039 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-telemetry-power-monitoring-combined-ca-bundle\") pod \"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044\" (UID: \"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044\") " Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.267146 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-ceilometer-ipmi-config-data-2\") pod \"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044\" (UID: \"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044\") " Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.267204 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-ceilometer-ipmi-config-data-1\") pod \"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044\" (UID: \"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044\") " Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.267231 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-ssh-key\") pod \"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044\" (UID: \"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044\") " Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.267471 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-ceilometer-ipmi-config-data-0\") pod \"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044\" (UID: \"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044\") " Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.267568 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-inventory\") pod \"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044\" (UID: \"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044\") " Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.273307 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-telemetry-power-monitoring-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-power-monitoring-combined-ca-bundle") pod "a09aa3eb-06ca-4f41-9d00-0b13b8bd8044" (UID: "a09aa3eb-06ca-4f41-9d00-0b13b8bd8044"). InnerVolumeSpecName "telemetry-power-monitoring-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.274013 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-kube-api-access-g74sc" (OuterVolumeSpecName: "kube-api-access-g74sc") pod "a09aa3eb-06ca-4f41-9d00-0b13b8bd8044" (UID: "a09aa3eb-06ca-4f41-9d00-0b13b8bd8044"). InnerVolumeSpecName "kube-api-access-g74sc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.303436 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a09aa3eb-06ca-4f41-9d00-0b13b8bd8044" (UID: "a09aa3eb-06ca-4f41-9d00-0b13b8bd8044"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.305624 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-ceilometer-ipmi-config-data-2" (OuterVolumeSpecName: "ceilometer-ipmi-config-data-2") pod "a09aa3eb-06ca-4f41-9d00-0b13b8bd8044" (UID: "a09aa3eb-06ca-4f41-9d00-0b13b8bd8044"). InnerVolumeSpecName "ceilometer-ipmi-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.306632 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-ceilometer-ipmi-config-data-1" (OuterVolumeSpecName: "ceilometer-ipmi-config-data-1") pod "a09aa3eb-06ca-4f41-9d00-0b13b8bd8044" (UID: "a09aa3eb-06ca-4f41-9d00-0b13b8bd8044"). InnerVolumeSpecName "ceilometer-ipmi-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.308707 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-ceilometer-ipmi-config-data-0" (OuterVolumeSpecName: "ceilometer-ipmi-config-data-0") pod "a09aa3eb-06ca-4f41-9d00-0b13b8bd8044" (UID: "a09aa3eb-06ca-4f41-9d00-0b13b8bd8044"). InnerVolumeSpecName "ceilometer-ipmi-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.309226 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-inventory" (OuterVolumeSpecName: "inventory") pod "a09aa3eb-06ca-4f41-9d00-0b13b8bd8044" (UID: "a09aa3eb-06ca-4f41-9d00-0b13b8bd8044"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.371250 4854 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.371284 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g74sc\" (UniqueName: \"kubernetes.io/projected/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-kube-api-access-g74sc\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.371303 4854 reconciler_common.go:293] "Volume detached for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-telemetry-power-monitoring-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.371313 4854 reconciler_common.go:293] "Volume detached for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-ceilometer-ipmi-config-data-2\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.371325 4854 reconciler_common.go:293] "Volume detached for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-ceilometer-ipmi-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.371334 4854 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.371343 4854 reconciler_common.go:293] "Volume detached for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/a09aa3eb-06ca-4f41-9d00-0b13b8bd8044-ceilometer-ipmi-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.542418 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc" event={"ID":"a09aa3eb-06ca-4f41-9d00-0b13b8bd8044","Type":"ContainerDied","Data":"2434c9fcc940c3c0a1867016b3ffcc9983975a4409fa1ac9e3ccd38cd8de9642"} Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.542468 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2434c9fcc940c3c0a1867016b3ffcc9983975a4409fa1ac9e3ccd38cd8de9642" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.542513 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.663839 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/logging-edpm-deployment-openstack-edpm-ipam-5hbrg"] Nov 25 10:28:37 crc kubenswrapper[4854]: E1125 10:28:37.664541 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dab4003e-2d83-4c6a-8c8d-529ea93cc8e8" containerName="extract-content" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.664566 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="dab4003e-2d83-4c6a-8c8d-529ea93cc8e8" containerName="extract-content" Nov 25 10:28:37 crc kubenswrapper[4854]: E1125 10:28:37.664575 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9" containerName="registry-server" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.664581 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9" containerName="registry-server" Nov 25 10:28:37 crc kubenswrapper[4854]: E1125 10:28:37.664599 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9" containerName="extract-utilities" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.664605 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9" containerName="extract-utilities" Nov 25 10:28:37 crc kubenswrapper[4854]: E1125 10:28:37.664618 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dab4003e-2d83-4c6a-8c8d-529ea93cc8e8" containerName="extract-utilities" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.664625 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="dab4003e-2d83-4c6a-8c8d-529ea93cc8e8" containerName="extract-utilities" Nov 25 10:28:37 crc kubenswrapper[4854]: E1125 10:28:37.664638 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9" containerName="extract-content" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.664644 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9" containerName="extract-content" Nov 25 10:28:37 crc kubenswrapper[4854]: E1125 10:28:37.664699 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dab4003e-2d83-4c6a-8c8d-529ea93cc8e8" containerName="registry-server" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.664707 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="dab4003e-2d83-4c6a-8c8d-529ea93cc8e8" containerName="registry-server" Nov 25 10:28:37 crc kubenswrapper[4854]: E1125 10:28:37.664720 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a09aa3eb-06ca-4f41-9d00-0b13b8bd8044" containerName="telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.664727 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="a09aa3eb-06ca-4f41-9d00-0b13b8bd8044" containerName="telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.664969 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="b175c0d5-83c3-4a9c-a8bc-cae0ed092ca9" containerName="registry-server" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.664999 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="dab4003e-2d83-4c6a-8c8d-529ea93cc8e8" containerName="registry-server" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.665027 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="a09aa3eb-06ca-4f41-9d00-0b13b8bd8044" containerName="telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.665963 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-5hbrg" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.672659 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"logging-compute-config-data" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.672856 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.672876 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.672927 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-6xbdw" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.673631 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.674747 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/logging-edpm-deployment-openstack-edpm-ipam-5hbrg"] Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.781741 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/24119236-3cfd-4959-b117-36042fab9fb3-logging-compute-config-data-1\") pod \"logging-edpm-deployment-openstack-edpm-ipam-5hbrg\" (UID: \"24119236-3cfd-4959-b117-36042fab9fb3\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-5hbrg" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.781953 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/24119236-3cfd-4959-b117-36042fab9fb3-ssh-key\") pod \"logging-edpm-deployment-openstack-edpm-ipam-5hbrg\" (UID: \"24119236-3cfd-4959-b117-36042fab9fb3\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-5hbrg" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.782010 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zfz8z\" (UniqueName: \"kubernetes.io/projected/24119236-3cfd-4959-b117-36042fab9fb3-kube-api-access-zfz8z\") pod \"logging-edpm-deployment-openstack-edpm-ipam-5hbrg\" (UID: \"24119236-3cfd-4959-b117-36042fab9fb3\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-5hbrg" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.782408 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/24119236-3cfd-4959-b117-36042fab9fb3-inventory\") pod \"logging-edpm-deployment-openstack-edpm-ipam-5hbrg\" (UID: \"24119236-3cfd-4959-b117-36042fab9fb3\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-5hbrg" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.782474 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/24119236-3cfd-4959-b117-36042fab9fb3-logging-compute-config-data-0\") pod \"logging-edpm-deployment-openstack-edpm-ipam-5hbrg\" (UID: \"24119236-3cfd-4959-b117-36042fab9fb3\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-5hbrg" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.885024 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/24119236-3cfd-4959-b117-36042fab9fb3-inventory\") pod \"logging-edpm-deployment-openstack-edpm-ipam-5hbrg\" (UID: \"24119236-3cfd-4959-b117-36042fab9fb3\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-5hbrg" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.885392 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/24119236-3cfd-4959-b117-36042fab9fb3-logging-compute-config-data-0\") pod \"logging-edpm-deployment-openstack-edpm-ipam-5hbrg\" (UID: \"24119236-3cfd-4959-b117-36042fab9fb3\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-5hbrg" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.885432 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/24119236-3cfd-4959-b117-36042fab9fb3-logging-compute-config-data-1\") pod \"logging-edpm-deployment-openstack-edpm-ipam-5hbrg\" (UID: \"24119236-3cfd-4959-b117-36042fab9fb3\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-5hbrg" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.885521 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/24119236-3cfd-4959-b117-36042fab9fb3-ssh-key\") pod \"logging-edpm-deployment-openstack-edpm-ipam-5hbrg\" (UID: \"24119236-3cfd-4959-b117-36042fab9fb3\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-5hbrg" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.885558 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zfz8z\" (UniqueName: \"kubernetes.io/projected/24119236-3cfd-4959-b117-36042fab9fb3-kube-api-access-zfz8z\") pod \"logging-edpm-deployment-openstack-edpm-ipam-5hbrg\" (UID: \"24119236-3cfd-4959-b117-36042fab9fb3\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-5hbrg" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.890079 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/24119236-3cfd-4959-b117-36042fab9fb3-inventory\") pod \"logging-edpm-deployment-openstack-edpm-ipam-5hbrg\" (UID: \"24119236-3cfd-4959-b117-36042fab9fb3\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-5hbrg" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.890890 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/24119236-3cfd-4959-b117-36042fab9fb3-logging-compute-config-data-1\") pod \"logging-edpm-deployment-openstack-edpm-ipam-5hbrg\" (UID: \"24119236-3cfd-4959-b117-36042fab9fb3\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-5hbrg" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.893779 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/24119236-3cfd-4959-b117-36042fab9fb3-logging-compute-config-data-0\") pod \"logging-edpm-deployment-openstack-edpm-ipam-5hbrg\" (UID: \"24119236-3cfd-4959-b117-36042fab9fb3\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-5hbrg" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.894889 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/24119236-3cfd-4959-b117-36042fab9fb3-ssh-key\") pod \"logging-edpm-deployment-openstack-edpm-ipam-5hbrg\" (UID: \"24119236-3cfd-4959-b117-36042fab9fb3\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-5hbrg" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.904855 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zfz8z\" (UniqueName: \"kubernetes.io/projected/24119236-3cfd-4959-b117-36042fab9fb3-kube-api-access-zfz8z\") pod \"logging-edpm-deployment-openstack-edpm-ipam-5hbrg\" (UID: \"24119236-3cfd-4959-b117-36042fab9fb3\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-5hbrg" Nov 25 10:28:37 crc kubenswrapper[4854]: I1125 10:28:37.984932 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-5hbrg" Nov 25 10:28:38 crc kubenswrapper[4854]: I1125 10:28:38.529395 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/logging-edpm-deployment-openstack-edpm-ipam-5hbrg"] Nov 25 10:28:38 crc kubenswrapper[4854]: I1125 10:28:38.555870 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-5hbrg" event={"ID":"24119236-3cfd-4959-b117-36042fab9fb3","Type":"ContainerStarted","Data":"ad06265d92016f04e2c86ccd686a1bfbfdf3351fd06eb6d26c0333f07b901b1c"} Nov 25 10:28:39 crc kubenswrapper[4854]: I1125 10:28:39.567174 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-5hbrg" event={"ID":"24119236-3cfd-4959-b117-36042fab9fb3","Type":"ContainerStarted","Data":"7198f2f7a26aaf6439409c3c7bab27ccf70ca1525f986670f1e891afd9ceecbc"} Nov 25 10:28:39 crc kubenswrapper[4854]: I1125 10:28:39.610548 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-5hbrg" podStartSLOduration=1.9892175170000002 podStartE2EDuration="2.610526392s" podCreationTimestamp="2025-11-25 10:28:37 +0000 UTC" firstStartedPulling="2025-11-25 10:28:38.534557874 +0000 UTC m=+3124.387551250" lastFinishedPulling="2025-11-25 10:28:39.155866749 +0000 UTC m=+3125.008860125" observedRunningTime="2025-11-25 10:28:39.605047992 +0000 UTC m=+3125.458041378" watchObservedRunningTime="2025-11-25 10:28:39.610526392 +0000 UTC m=+3125.463519768" Nov 25 10:28:43 crc kubenswrapper[4854]: I1125 10:28:43.013998 4854 scope.go:117] "RemoveContainer" containerID="047d06823f958a969b84e35dd1c97a09fe47f798d94dae3729c5fe72dd409d06" Nov 25 10:28:43 crc kubenswrapper[4854]: E1125 10:28:43.014845 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:28:54 crc kubenswrapper[4854]: I1125 10:28:54.764513 4854 generic.go:334] "Generic (PLEG): container finished" podID="24119236-3cfd-4959-b117-36042fab9fb3" containerID="7198f2f7a26aaf6439409c3c7bab27ccf70ca1525f986670f1e891afd9ceecbc" exitCode=0 Nov 25 10:28:54 crc kubenswrapper[4854]: I1125 10:28:54.764588 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-5hbrg" event={"ID":"24119236-3cfd-4959-b117-36042fab9fb3","Type":"ContainerDied","Data":"7198f2f7a26aaf6439409c3c7bab27ccf70ca1525f986670f1e891afd9ceecbc"} Nov 25 10:28:56 crc kubenswrapper[4854]: I1125 10:28:56.255732 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-5hbrg" Nov 25 10:28:56 crc kubenswrapper[4854]: I1125 10:28:56.444541 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/24119236-3cfd-4959-b117-36042fab9fb3-ssh-key\") pod \"24119236-3cfd-4959-b117-36042fab9fb3\" (UID: \"24119236-3cfd-4959-b117-36042fab9fb3\") " Nov 25 10:28:56 crc kubenswrapper[4854]: I1125 10:28:56.445026 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/24119236-3cfd-4959-b117-36042fab9fb3-inventory\") pod \"24119236-3cfd-4959-b117-36042fab9fb3\" (UID: \"24119236-3cfd-4959-b117-36042fab9fb3\") " Nov 25 10:28:56 crc kubenswrapper[4854]: I1125 10:28:56.445078 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/24119236-3cfd-4959-b117-36042fab9fb3-logging-compute-config-data-1\") pod \"24119236-3cfd-4959-b117-36042fab9fb3\" (UID: \"24119236-3cfd-4959-b117-36042fab9fb3\") " Nov 25 10:28:56 crc kubenswrapper[4854]: I1125 10:28:56.445152 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zfz8z\" (UniqueName: \"kubernetes.io/projected/24119236-3cfd-4959-b117-36042fab9fb3-kube-api-access-zfz8z\") pod \"24119236-3cfd-4959-b117-36042fab9fb3\" (UID: \"24119236-3cfd-4959-b117-36042fab9fb3\") " Nov 25 10:28:56 crc kubenswrapper[4854]: I1125 10:28:56.445180 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/24119236-3cfd-4959-b117-36042fab9fb3-logging-compute-config-data-0\") pod \"24119236-3cfd-4959-b117-36042fab9fb3\" (UID: \"24119236-3cfd-4959-b117-36042fab9fb3\") " Nov 25 10:28:56 crc kubenswrapper[4854]: I1125 10:28:56.450666 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24119236-3cfd-4959-b117-36042fab9fb3-kube-api-access-zfz8z" (OuterVolumeSpecName: "kube-api-access-zfz8z") pod "24119236-3cfd-4959-b117-36042fab9fb3" (UID: "24119236-3cfd-4959-b117-36042fab9fb3"). InnerVolumeSpecName "kube-api-access-zfz8z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:28:56 crc kubenswrapper[4854]: I1125 10:28:56.479291 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24119236-3cfd-4959-b117-36042fab9fb3-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "24119236-3cfd-4959-b117-36042fab9fb3" (UID: "24119236-3cfd-4959-b117-36042fab9fb3"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:28:56 crc kubenswrapper[4854]: I1125 10:28:56.479586 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24119236-3cfd-4959-b117-36042fab9fb3-logging-compute-config-data-0" (OuterVolumeSpecName: "logging-compute-config-data-0") pod "24119236-3cfd-4959-b117-36042fab9fb3" (UID: "24119236-3cfd-4959-b117-36042fab9fb3"). InnerVolumeSpecName "logging-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:28:56 crc kubenswrapper[4854]: I1125 10:28:56.481008 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24119236-3cfd-4959-b117-36042fab9fb3-inventory" (OuterVolumeSpecName: "inventory") pod "24119236-3cfd-4959-b117-36042fab9fb3" (UID: "24119236-3cfd-4959-b117-36042fab9fb3"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:28:56 crc kubenswrapper[4854]: I1125 10:28:56.497547 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24119236-3cfd-4959-b117-36042fab9fb3-logging-compute-config-data-1" (OuterVolumeSpecName: "logging-compute-config-data-1") pod "24119236-3cfd-4959-b117-36042fab9fb3" (UID: "24119236-3cfd-4959-b117-36042fab9fb3"). InnerVolumeSpecName "logging-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:28:56 crc kubenswrapper[4854]: I1125 10:28:56.548228 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zfz8z\" (UniqueName: \"kubernetes.io/projected/24119236-3cfd-4959-b117-36042fab9fb3-kube-api-access-zfz8z\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:56 crc kubenswrapper[4854]: I1125 10:28:56.548528 4854 reconciler_common.go:293] "Volume detached for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/24119236-3cfd-4959-b117-36042fab9fb3-logging-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:56 crc kubenswrapper[4854]: I1125 10:28:56.548615 4854 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/24119236-3cfd-4959-b117-36042fab9fb3-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:56 crc kubenswrapper[4854]: I1125 10:28:56.548722 4854 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/24119236-3cfd-4959-b117-36042fab9fb3-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:56 crc kubenswrapper[4854]: I1125 10:28:56.548811 4854 reconciler_common.go:293] "Volume detached for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/24119236-3cfd-4959-b117-36042fab9fb3-logging-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:56 crc kubenswrapper[4854]: I1125 10:28:56.828927 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-5hbrg" event={"ID":"24119236-3cfd-4959-b117-36042fab9fb3","Type":"ContainerDied","Data":"ad06265d92016f04e2c86ccd686a1bfbfdf3351fd06eb6d26c0333f07b901b1c"} Nov 25 10:28:56 crc kubenswrapper[4854]: I1125 10:28:56.828969 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ad06265d92016f04e2c86ccd686a1bfbfdf3351fd06eb6d26c0333f07b901b1c" Nov 25 10:28:56 crc kubenswrapper[4854]: I1125 10:28:56.829050 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-5hbrg" Nov 25 10:28:57 crc kubenswrapper[4854]: I1125 10:28:57.013744 4854 scope.go:117] "RemoveContainer" containerID="047d06823f958a969b84e35dd1c97a09fe47f798d94dae3729c5fe72dd409d06" Nov 25 10:28:57 crc kubenswrapper[4854]: E1125 10:28:57.014159 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:29:12 crc kubenswrapper[4854]: I1125 10:29:12.013391 4854 scope.go:117] "RemoveContainer" containerID="047d06823f958a969b84e35dd1c97a09fe47f798d94dae3729c5fe72dd409d06" Nov 25 10:29:12 crc kubenswrapper[4854]: E1125 10:29:12.014308 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:29:26 crc kubenswrapper[4854]: I1125 10:29:26.014126 4854 scope.go:117] "RemoveContainer" containerID="047d06823f958a969b84e35dd1c97a09fe47f798d94dae3729c5fe72dd409d06" Nov 25 10:29:26 crc kubenswrapper[4854]: E1125 10:29:26.015361 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:29:37 crc kubenswrapper[4854]: I1125 10:29:37.014245 4854 scope.go:117] "RemoveContainer" containerID="047d06823f958a969b84e35dd1c97a09fe47f798d94dae3729c5fe72dd409d06" Nov 25 10:29:37 crc kubenswrapper[4854]: E1125 10:29:37.015372 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:29:51 crc kubenswrapper[4854]: I1125 10:29:51.013162 4854 scope.go:117] "RemoveContainer" containerID="047d06823f958a969b84e35dd1c97a09fe47f798d94dae3729c5fe72dd409d06" Nov 25 10:29:51 crc kubenswrapper[4854]: E1125 10:29:51.013917 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:30:00 crc kubenswrapper[4854]: I1125 10:30:00.157908 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401110-s5krq"] Nov 25 10:30:00 crc kubenswrapper[4854]: E1125 10:30:00.159383 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24119236-3cfd-4959-b117-36042fab9fb3" containerName="logging-edpm-deployment-openstack-edpm-ipam" Nov 25 10:30:00 crc kubenswrapper[4854]: I1125 10:30:00.159404 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="24119236-3cfd-4959-b117-36042fab9fb3" containerName="logging-edpm-deployment-openstack-edpm-ipam" Nov 25 10:30:00 crc kubenswrapper[4854]: I1125 10:30:00.159823 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="24119236-3cfd-4959-b117-36042fab9fb3" containerName="logging-edpm-deployment-openstack-edpm-ipam" Nov 25 10:30:00 crc kubenswrapper[4854]: I1125 10:30:00.160927 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-s5krq" Nov 25 10:30:00 crc kubenswrapper[4854]: I1125 10:30:00.164459 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 10:30:00 crc kubenswrapper[4854]: I1125 10:30:00.173540 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 10:30:00 crc kubenswrapper[4854]: I1125 10:30:00.180281 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401110-s5krq"] Nov 25 10:30:00 crc kubenswrapper[4854]: I1125 10:30:00.256501 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a5818cf0-e565-45d7-a13b-8a6e5a9a7a45-config-volume\") pod \"collect-profiles-29401110-s5krq\" (UID: \"a5818cf0-e565-45d7-a13b-8a6e5a9a7a45\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-s5krq" Nov 25 10:30:00 crc kubenswrapper[4854]: I1125 10:30:00.257074 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a5818cf0-e565-45d7-a13b-8a6e5a9a7a45-secret-volume\") pod \"collect-profiles-29401110-s5krq\" (UID: \"a5818cf0-e565-45d7-a13b-8a6e5a9a7a45\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-s5krq" Nov 25 10:30:00 crc kubenswrapper[4854]: I1125 10:30:00.257313 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2qjqv\" (UniqueName: \"kubernetes.io/projected/a5818cf0-e565-45d7-a13b-8a6e5a9a7a45-kube-api-access-2qjqv\") pod \"collect-profiles-29401110-s5krq\" (UID: \"a5818cf0-e565-45d7-a13b-8a6e5a9a7a45\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-s5krq" Nov 25 10:30:00 crc kubenswrapper[4854]: I1125 10:30:00.359893 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a5818cf0-e565-45d7-a13b-8a6e5a9a7a45-config-volume\") pod \"collect-profiles-29401110-s5krq\" (UID: \"a5818cf0-e565-45d7-a13b-8a6e5a9a7a45\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-s5krq" Nov 25 10:30:00 crc kubenswrapper[4854]: I1125 10:30:00.360177 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a5818cf0-e565-45d7-a13b-8a6e5a9a7a45-secret-volume\") pod \"collect-profiles-29401110-s5krq\" (UID: \"a5818cf0-e565-45d7-a13b-8a6e5a9a7a45\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-s5krq" Nov 25 10:30:00 crc kubenswrapper[4854]: I1125 10:30:00.360335 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2qjqv\" (UniqueName: \"kubernetes.io/projected/a5818cf0-e565-45d7-a13b-8a6e5a9a7a45-kube-api-access-2qjqv\") pod \"collect-profiles-29401110-s5krq\" (UID: \"a5818cf0-e565-45d7-a13b-8a6e5a9a7a45\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-s5krq" Nov 25 10:30:00 crc kubenswrapper[4854]: I1125 10:30:00.361104 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a5818cf0-e565-45d7-a13b-8a6e5a9a7a45-config-volume\") pod \"collect-profiles-29401110-s5krq\" (UID: \"a5818cf0-e565-45d7-a13b-8a6e5a9a7a45\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-s5krq" Nov 25 10:30:00 crc kubenswrapper[4854]: I1125 10:30:00.367305 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a5818cf0-e565-45d7-a13b-8a6e5a9a7a45-secret-volume\") pod \"collect-profiles-29401110-s5krq\" (UID: \"a5818cf0-e565-45d7-a13b-8a6e5a9a7a45\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-s5krq" Nov 25 10:30:00 crc kubenswrapper[4854]: I1125 10:30:00.376636 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2qjqv\" (UniqueName: \"kubernetes.io/projected/a5818cf0-e565-45d7-a13b-8a6e5a9a7a45-kube-api-access-2qjqv\") pod \"collect-profiles-29401110-s5krq\" (UID: \"a5818cf0-e565-45d7-a13b-8a6e5a9a7a45\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-s5krq" Nov 25 10:30:00 crc kubenswrapper[4854]: I1125 10:30:00.494019 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-s5krq" Nov 25 10:30:00 crc kubenswrapper[4854]: I1125 10:30:00.958555 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401110-s5krq"] Nov 25 10:30:01 crc kubenswrapper[4854]: I1125 10:30:01.560268 4854 generic.go:334] "Generic (PLEG): container finished" podID="a5818cf0-e565-45d7-a13b-8a6e5a9a7a45" containerID="a38455385db4471897b8726eb92b373fd7a92d267ac57404943c1637475aa0d6" exitCode=0 Nov 25 10:30:01 crc kubenswrapper[4854]: I1125 10:30:01.560316 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-s5krq" event={"ID":"a5818cf0-e565-45d7-a13b-8a6e5a9a7a45","Type":"ContainerDied","Data":"a38455385db4471897b8726eb92b373fd7a92d267ac57404943c1637475aa0d6"} Nov 25 10:30:01 crc kubenswrapper[4854]: I1125 10:30:01.560564 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-s5krq" event={"ID":"a5818cf0-e565-45d7-a13b-8a6e5a9a7a45","Type":"ContainerStarted","Data":"9826987ccc3df4d90533e0550fa2e50b38506ac6ba3b32826c7a35bd793acb9e"} Nov 25 10:30:02 crc kubenswrapper[4854]: I1125 10:30:02.984572 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-s5krq" Nov 25 10:30:03 crc kubenswrapper[4854]: I1125 10:30:03.020574 4854 scope.go:117] "RemoveContainer" containerID="047d06823f958a969b84e35dd1c97a09fe47f798d94dae3729c5fe72dd409d06" Nov 25 10:30:03 crc kubenswrapper[4854]: E1125 10:30:03.020942 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:30:03 crc kubenswrapper[4854]: I1125 10:30:03.137509 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a5818cf0-e565-45d7-a13b-8a6e5a9a7a45-config-volume\") pod \"a5818cf0-e565-45d7-a13b-8a6e5a9a7a45\" (UID: \"a5818cf0-e565-45d7-a13b-8a6e5a9a7a45\") " Nov 25 10:30:03 crc kubenswrapper[4854]: I1125 10:30:03.137948 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a5818cf0-e565-45d7-a13b-8a6e5a9a7a45-secret-volume\") pod \"a5818cf0-e565-45d7-a13b-8a6e5a9a7a45\" (UID: \"a5818cf0-e565-45d7-a13b-8a6e5a9a7a45\") " Nov 25 10:30:03 crc kubenswrapper[4854]: I1125 10:30:03.138017 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2qjqv\" (UniqueName: \"kubernetes.io/projected/a5818cf0-e565-45d7-a13b-8a6e5a9a7a45-kube-api-access-2qjqv\") pod \"a5818cf0-e565-45d7-a13b-8a6e5a9a7a45\" (UID: \"a5818cf0-e565-45d7-a13b-8a6e5a9a7a45\") " Nov 25 10:30:03 crc kubenswrapper[4854]: I1125 10:30:03.141329 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a5818cf0-e565-45d7-a13b-8a6e5a9a7a45-config-volume" (OuterVolumeSpecName: "config-volume") pod "a5818cf0-e565-45d7-a13b-8a6e5a9a7a45" (UID: "a5818cf0-e565-45d7-a13b-8a6e5a9a7a45"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:30:03 crc kubenswrapper[4854]: I1125 10:30:03.142476 4854 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a5818cf0-e565-45d7-a13b-8a6e5a9a7a45-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:03 crc kubenswrapper[4854]: I1125 10:30:03.147579 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5818cf0-e565-45d7-a13b-8a6e5a9a7a45-kube-api-access-2qjqv" (OuterVolumeSpecName: "kube-api-access-2qjqv") pod "a5818cf0-e565-45d7-a13b-8a6e5a9a7a45" (UID: "a5818cf0-e565-45d7-a13b-8a6e5a9a7a45"). InnerVolumeSpecName "kube-api-access-2qjqv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:30:03 crc kubenswrapper[4854]: I1125 10:30:03.161888 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5818cf0-e565-45d7-a13b-8a6e5a9a7a45-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "a5818cf0-e565-45d7-a13b-8a6e5a9a7a45" (UID: "a5818cf0-e565-45d7-a13b-8a6e5a9a7a45"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:30:03 crc kubenswrapper[4854]: I1125 10:30:03.245271 4854 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a5818cf0-e565-45d7-a13b-8a6e5a9a7a45-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:03 crc kubenswrapper[4854]: I1125 10:30:03.245316 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2qjqv\" (UniqueName: \"kubernetes.io/projected/a5818cf0-e565-45d7-a13b-8a6e5a9a7a45-kube-api-access-2qjqv\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:03 crc kubenswrapper[4854]: I1125 10:30:03.594471 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-s5krq" event={"ID":"a5818cf0-e565-45d7-a13b-8a6e5a9a7a45","Type":"ContainerDied","Data":"9826987ccc3df4d90533e0550fa2e50b38506ac6ba3b32826c7a35bd793acb9e"} Nov 25 10:30:03 crc kubenswrapper[4854]: I1125 10:30:03.594548 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9826987ccc3df4d90533e0550fa2e50b38506ac6ba3b32826c7a35bd793acb9e" Nov 25 10:30:03 crc kubenswrapper[4854]: I1125 10:30:03.595040 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-s5krq" Nov 25 10:30:04 crc kubenswrapper[4854]: I1125 10:30:04.075098 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401065-8mh5z"] Nov 25 10:30:04 crc kubenswrapper[4854]: I1125 10:30:04.089149 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401065-8mh5z"] Nov 25 10:30:05 crc kubenswrapper[4854]: I1125 10:30:05.038427 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5d8869a0-3da4-4d51-8027-f63d7999f409" path="/var/lib/kubelet/pods/5d8869a0-3da4-4d51-8027-f63d7999f409/volumes" Nov 25 10:30:15 crc kubenswrapper[4854]: I1125 10:30:15.030362 4854 scope.go:117] "RemoveContainer" containerID="047d06823f958a969b84e35dd1c97a09fe47f798d94dae3729c5fe72dd409d06" Nov 25 10:30:15 crc kubenswrapper[4854]: E1125 10:30:15.031538 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:30:20 crc kubenswrapper[4854]: I1125 10:30:20.604181 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-pqw9x"] Nov 25 10:30:20 crc kubenswrapper[4854]: E1125 10:30:20.605118 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5818cf0-e565-45d7-a13b-8a6e5a9a7a45" containerName="collect-profiles" Nov 25 10:30:20 crc kubenswrapper[4854]: I1125 10:30:20.605131 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5818cf0-e565-45d7-a13b-8a6e5a9a7a45" containerName="collect-profiles" Nov 25 10:30:20 crc kubenswrapper[4854]: I1125 10:30:20.605403 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5818cf0-e565-45d7-a13b-8a6e5a9a7a45" containerName="collect-profiles" Nov 25 10:30:20 crc kubenswrapper[4854]: I1125 10:30:20.607216 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pqw9x" Nov 25 10:30:20 crc kubenswrapper[4854]: I1125 10:30:20.621980 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pqw9x"] Nov 25 10:30:20 crc kubenswrapper[4854]: I1125 10:30:20.740837 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-27bph\" (UniqueName: \"kubernetes.io/projected/8c436160-4490-4509-9eb3-1cf22c505e0a-kube-api-access-27bph\") pod \"redhat-operators-pqw9x\" (UID: \"8c436160-4490-4509-9eb3-1cf22c505e0a\") " pod="openshift-marketplace/redhat-operators-pqw9x" Nov 25 10:30:20 crc kubenswrapper[4854]: I1125 10:30:20.740970 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8c436160-4490-4509-9eb3-1cf22c505e0a-utilities\") pod \"redhat-operators-pqw9x\" (UID: \"8c436160-4490-4509-9eb3-1cf22c505e0a\") " pod="openshift-marketplace/redhat-operators-pqw9x" Nov 25 10:30:20 crc kubenswrapper[4854]: I1125 10:30:20.741280 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8c436160-4490-4509-9eb3-1cf22c505e0a-catalog-content\") pod \"redhat-operators-pqw9x\" (UID: \"8c436160-4490-4509-9eb3-1cf22c505e0a\") " pod="openshift-marketplace/redhat-operators-pqw9x" Nov 25 10:30:20 crc kubenswrapper[4854]: I1125 10:30:20.843312 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8c436160-4490-4509-9eb3-1cf22c505e0a-catalog-content\") pod \"redhat-operators-pqw9x\" (UID: \"8c436160-4490-4509-9eb3-1cf22c505e0a\") " pod="openshift-marketplace/redhat-operators-pqw9x" Nov 25 10:30:20 crc kubenswrapper[4854]: I1125 10:30:20.843530 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-27bph\" (UniqueName: \"kubernetes.io/projected/8c436160-4490-4509-9eb3-1cf22c505e0a-kube-api-access-27bph\") pod \"redhat-operators-pqw9x\" (UID: \"8c436160-4490-4509-9eb3-1cf22c505e0a\") " pod="openshift-marketplace/redhat-operators-pqw9x" Nov 25 10:30:20 crc kubenswrapper[4854]: I1125 10:30:20.843585 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8c436160-4490-4509-9eb3-1cf22c505e0a-utilities\") pod \"redhat-operators-pqw9x\" (UID: \"8c436160-4490-4509-9eb3-1cf22c505e0a\") " pod="openshift-marketplace/redhat-operators-pqw9x" Nov 25 10:30:20 crc kubenswrapper[4854]: I1125 10:30:20.843973 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8c436160-4490-4509-9eb3-1cf22c505e0a-catalog-content\") pod \"redhat-operators-pqw9x\" (UID: \"8c436160-4490-4509-9eb3-1cf22c505e0a\") " pod="openshift-marketplace/redhat-operators-pqw9x" Nov 25 10:30:20 crc kubenswrapper[4854]: I1125 10:30:20.844148 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8c436160-4490-4509-9eb3-1cf22c505e0a-utilities\") pod \"redhat-operators-pqw9x\" (UID: \"8c436160-4490-4509-9eb3-1cf22c505e0a\") " pod="openshift-marketplace/redhat-operators-pqw9x" Nov 25 10:30:20 crc kubenswrapper[4854]: I1125 10:30:20.868260 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-27bph\" (UniqueName: \"kubernetes.io/projected/8c436160-4490-4509-9eb3-1cf22c505e0a-kube-api-access-27bph\") pod \"redhat-operators-pqw9x\" (UID: \"8c436160-4490-4509-9eb3-1cf22c505e0a\") " pod="openshift-marketplace/redhat-operators-pqw9x" Nov 25 10:30:20 crc kubenswrapper[4854]: I1125 10:30:20.938836 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pqw9x" Nov 25 10:30:21 crc kubenswrapper[4854]: I1125 10:30:21.497632 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pqw9x"] Nov 25 10:30:21 crc kubenswrapper[4854]: I1125 10:30:21.819621 4854 generic.go:334] "Generic (PLEG): container finished" podID="8c436160-4490-4509-9eb3-1cf22c505e0a" containerID="3d2382ad0443572c4eda01ef0c5e251e94466c9d924ece2c8ce5b0c2b58ca9b6" exitCode=0 Nov 25 10:30:21 crc kubenswrapper[4854]: I1125 10:30:21.819940 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pqw9x" event={"ID":"8c436160-4490-4509-9eb3-1cf22c505e0a","Type":"ContainerDied","Data":"3d2382ad0443572c4eda01ef0c5e251e94466c9d924ece2c8ce5b0c2b58ca9b6"} Nov 25 10:30:21 crc kubenswrapper[4854]: I1125 10:30:21.819974 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pqw9x" event={"ID":"8c436160-4490-4509-9eb3-1cf22c505e0a","Type":"ContainerStarted","Data":"5419c3e653a71ae43161fc5897785738ba8908c7d97580f3d5eafd59862f3d3d"} Nov 25 10:30:22 crc kubenswrapper[4854]: E1125 10:30:22.015507 4854 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8c436160_4490_4509_9eb3_1cf22c505e0a.slice/crio-3d2382ad0443572c4eda01ef0c5e251e94466c9d924ece2c8ce5b0c2b58ca9b6.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8c436160_4490_4509_9eb3_1cf22c505e0a.slice/crio-conmon-3d2382ad0443572c4eda01ef0c5e251e94466c9d924ece2c8ce5b0c2b58ca9b6.scope\": RecentStats: unable to find data in memory cache]" Nov 25 10:30:28 crc kubenswrapper[4854]: I1125 10:30:28.013958 4854 scope.go:117] "RemoveContainer" containerID="047d06823f958a969b84e35dd1c97a09fe47f798d94dae3729c5fe72dd409d06" Nov 25 10:30:28 crc kubenswrapper[4854]: E1125 10:30:28.014893 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:30:30 crc kubenswrapper[4854]: I1125 10:30:30.935571 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pqw9x" event={"ID":"8c436160-4490-4509-9eb3-1cf22c505e0a","Type":"ContainerStarted","Data":"b44ddc3812e40fa6c891fb9a685bcd0a2ce80f1ad0d40982589e63a5a74f1295"} Nov 25 10:30:31 crc kubenswrapper[4854]: I1125 10:30:31.951452 4854 generic.go:334] "Generic (PLEG): container finished" podID="8c436160-4490-4509-9eb3-1cf22c505e0a" containerID="b44ddc3812e40fa6c891fb9a685bcd0a2ce80f1ad0d40982589e63a5a74f1295" exitCode=0 Nov 25 10:30:31 crc kubenswrapper[4854]: I1125 10:30:31.951528 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pqw9x" event={"ID":"8c436160-4490-4509-9eb3-1cf22c505e0a","Type":"ContainerDied","Data":"b44ddc3812e40fa6c891fb9a685bcd0a2ce80f1ad0d40982589e63a5a74f1295"} Nov 25 10:30:32 crc kubenswrapper[4854]: I1125 10:30:32.964827 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pqw9x" event={"ID":"8c436160-4490-4509-9eb3-1cf22c505e0a","Type":"ContainerStarted","Data":"46ce7bda2d5d10c172f5529e33847ed1ed4dcbf5824844bd2e35773ee7a651c2"} Nov 25 10:30:32 crc kubenswrapper[4854]: I1125 10:30:32.994503 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-pqw9x" podStartSLOduration=2.213712357 podStartE2EDuration="12.994485472s" podCreationTimestamp="2025-11-25 10:30:20 +0000 UTC" firstStartedPulling="2025-11-25 10:30:21.822516971 +0000 UTC m=+3227.675510347" lastFinishedPulling="2025-11-25 10:30:32.603290076 +0000 UTC m=+3238.456283462" observedRunningTime="2025-11-25 10:30:32.981172778 +0000 UTC m=+3238.834166174" watchObservedRunningTime="2025-11-25 10:30:32.994485472 +0000 UTC m=+3238.847478848" Nov 25 10:30:40 crc kubenswrapper[4854]: I1125 10:30:40.939802 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-pqw9x" Nov 25 10:30:40 crc kubenswrapper[4854]: I1125 10:30:40.940427 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-pqw9x" Nov 25 10:30:41 crc kubenswrapper[4854]: I1125 10:30:41.006457 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-pqw9x" Nov 25 10:30:41 crc kubenswrapper[4854]: I1125 10:30:41.139612 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-pqw9x" Nov 25 10:30:41 crc kubenswrapper[4854]: I1125 10:30:41.327305 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pqw9x"] Nov 25 10:30:41 crc kubenswrapper[4854]: I1125 10:30:41.387765 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9r85f"] Nov 25 10:30:41 crc kubenswrapper[4854]: I1125 10:30:41.387975 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-9r85f" podUID="41d2e6b9-caac-4cfc-b54c-aed1daa2fa28" containerName="registry-server" containerID="cri-o://7edd164f4e8e5391f94c8e969c1ee496c91b710f7839537662ed5b0c611bfda4" gracePeriod=2 Nov 25 10:30:42 crc kubenswrapper[4854]: I1125 10:30:42.110309 4854 generic.go:334] "Generic (PLEG): container finished" podID="41d2e6b9-caac-4cfc-b54c-aed1daa2fa28" containerID="7edd164f4e8e5391f94c8e969c1ee496c91b710f7839537662ed5b0c611bfda4" exitCode=0 Nov 25 10:30:42 crc kubenswrapper[4854]: I1125 10:30:42.110585 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9r85f" event={"ID":"41d2e6b9-caac-4cfc-b54c-aed1daa2fa28","Type":"ContainerDied","Data":"7edd164f4e8e5391f94c8e969c1ee496c91b710f7839537662ed5b0c611bfda4"} Nov 25 10:30:42 crc kubenswrapper[4854]: I1125 10:30:42.111103 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9r85f" event={"ID":"41d2e6b9-caac-4cfc-b54c-aed1daa2fa28","Type":"ContainerDied","Data":"745fc28b60c85df52e5789e2545286a0324c880ee6ececc18bbab9688f7fc8ca"} Nov 25 10:30:42 crc kubenswrapper[4854]: I1125 10:30:42.111124 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="745fc28b60c85df52e5789e2545286a0324c880ee6ececc18bbab9688f7fc8ca" Nov 25 10:30:42 crc kubenswrapper[4854]: I1125 10:30:42.122359 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9r85f" Nov 25 10:30:42 crc kubenswrapper[4854]: I1125 10:30:42.211354 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41d2e6b9-caac-4cfc-b54c-aed1daa2fa28-catalog-content\") pod \"41d2e6b9-caac-4cfc-b54c-aed1daa2fa28\" (UID: \"41d2e6b9-caac-4cfc-b54c-aed1daa2fa28\") " Nov 25 10:30:42 crc kubenswrapper[4854]: I1125 10:30:42.213611 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41d2e6b9-caac-4cfc-b54c-aed1daa2fa28-utilities\") pod \"41d2e6b9-caac-4cfc-b54c-aed1daa2fa28\" (UID: \"41d2e6b9-caac-4cfc-b54c-aed1daa2fa28\") " Nov 25 10:30:42 crc kubenswrapper[4854]: I1125 10:30:42.214804 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/41d2e6b9-caac-4cfc-b54c-aed1daa2fa28-utilities" (OuterVolumeSpecName: "utilities") pod "41d2e6b9-caac-4cfc-b54c-aed1daa2fa28" (UID: "41d2e6b9-caac-4cfc-b54c-aed1daa2fa28"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:30:42 crc kubenswrapper[4854]: I1125 10:30:42.216370 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nflt4\" (UniqueName: \"kubernetes.io/projected/41d2e6b9-caac-4cfc-b54c-aed1daa2fa28-kube-api-access-nflt4\") pod \"41d2e6b9-caac-4cfc-b54c-aed1daa2fa28\" (UID: \"41d2e6b9-caac-4cfc-b54c-aed1daa2fa28\") " Nov 25 10:30:42 crc kubenswrapper[4854]: I1125 10:30:42.220003 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41d2e6b9-caac-4cfc-b54c-aed1daa2fa28-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:42 crc kubenswrapper[4854]: I1125 10:30:42.234722 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41d2e6b9-caac-4cfc-b54c-aed1daa2fa28-kube-api-access-nflt4" (OuterVolumeSpecName: "kube-api-access-nflt4") pod "41d2e6b9-caac-4cfc-b54c-aed1daa2fa28" (UID: "41d2e6b9-caac-4cfc-b54c-aed1daa2fa28"). InnerVolumeSpecName "kube-api-access-nflt4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:30:42 crc kubenswrapper[4854]: I1125 10:30:42.310080 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/41d2e6b9-caac-4cfc-b54c-aed1daa2fa28-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "41d2e6b9-caac-4cfc-b54c-aed1daa2fa28" (UID: "41d2e6b9-caac-4cfc-b54c-aed1daa2fa28"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:30:42 crc kubenswrapper[4854]: I1125 10:30:42.322262 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nflt4\" (UniqueName: \"kubernetes.io/projected/41d2e6b9-caac-4cfc-b54c-aed1daa2fa28-kube-api-access-nflt4\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:42 crc kubenswrapper[4854]: I1125 10:30:42.322317 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41d2e6b9-caac-4cfc-b54c-aed1daa2fa28-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:43 crc kubenswrapper[4854]: I1125 10:30:43.013362 4854 scope.go:117] "RemoveContainer" containerID="047d06823f958a969b84e35dd1c97a09fe47f798d94dae3729c5fe72dd409d06" Nov 25 10:30:43 crc kubenswrapper[4854]: E1125 10:30:43.013977 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:30:43 crc kubenswrapper[4854]: I1125 10:30:43.121275 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9r85f" Nov 25 10:30:43 crc kubenswrapper[4854]: I1125 10:30:43.149898 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9r85f"] Nov 25 10:30:43 crc kubenswrapper[4854]: I1125 10:30:43.164404 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-9r85f"] Nov 25 10:30:45 crc kubenswrapper[4854]: I1125 10:30:45.026923 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="41d2e6b9-caac-4cfc-b54c-aed1daa2fa28" path="/var/lib/kubelet/pods/41d2e6b9-caac-4cfc-b54c-aed1daa2fa28/volumes" Nov 25 10:30:56 crc kubenswrapper[4854]: I1125 10:30:56.013785 4854 scope.go:117] "RemoveContainer" containerID="047d06823f958a969b84e35dd1c97a09fe47f798d94dae3729c5fe72dd409d06" Nov 25 10:30:56 crc kubenswrapper[4854]: I1125 10:30:56.565075 4854 scope.go:117] "RemoveContainer" containerID="572d5ed33214fd568416f6e7ada1d9f381717afb53fb76049eeeec5249e8076f" Nov 25 10:30:56 crc kubenswrapper[4854]: I1125 10:30:56.600646 4854 scope.go:117] "RemoveContainer" containerID="688680df371a53c6c53376e5208f914e84bf749bb5c249ca9df10fe1ed3c6871" Nov 25 10:30:56 crc kubenswrapper[4854]: I1125 10:30:56.694076 4854 scope.go:117] "RemoveContainer" containerID="7edd164f4e8e5391f94c8e969c1ee496c91b710f7839537662ed5b0c611bfda4" Nov 25 10:30:56 crc kubenswrapper[4854]: I1125 10:30:56.746494 4854 scope.go:117] "RemoveContainer" containerID="db3ebebd0f6f6b180664552fabf99e0fe5a037163aca04ecf4d1f59b43b9f038" Nov 25 10:30:57 crc kubenswrapper[4854]: I1125 10:30:57.275998 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerStarted","Data":"5a924c88f816080a3b538187dc28d3d522b2b04832b0be1c69f5187e891494c7"} Nov 25 10:31:19 crc kubenswrapper[4854]: I1125 10:31:19.980882 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-zlkr9"] Nov 25 10:31:19 crc kubenswrapper[4854]: E1125 10:31:19.982048 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41d2e6b9-caac-4cfc-b54c-aed1daa2fa28" containerName="extract-content" Nov 25 10:31:19 crc kubenswrapper[4854]: I1125 10:31:19.982066 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="41d2e6b9-caac-4cfc-b54c-aed1daa2fa28" containerName="extract-content" Nov 25 10:31:19 crc kubenswrapper[4854]: E1125 10:31:19.982085 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41d2e6b9-caac-4cfc-b54c-aed1daa2fa28" containerName="registry-server" Nov 25 10:31:19 crc kubenswrapper[4854]: I1125 10:31:19.982091 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="41d2e6b9-caac-4cfc-b54c-aed1daa2fa28" containerName="registry-server" Nov 25 10:31:19 crc kubenswrapper[4854]: E1125 10:31:19.982119 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41d2e6b9-caac-4cfc-b54c-aed1daa2fa28" containerName="extract-utilities" Nov 25 10:31:19 crc kubenswrapper[4854]: I1125 10:31:19.982127 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="41d2e6b9-caac-4cfc-b54c-aed1daa2fa28" containerName="extract-utilities" Nov 25 10:31:19 crc kubenswrapper[4854]: I1125 10:31:19.982393 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="41d2e6b9-caac-4cfc-b54c-aed1daa2fa28" containerName="registry-server" Nov 25 10:31:19 crc kubenswrapper[4854]: I1125 10:31:19.984437 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zlkr9" Nov 25 10:31:19 crc kubenswrapper[4854]: I1125 10:31:19.993145 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zlkr9"] Nov 25 10:31:20 crc kubenswrapper[4854]: I1125 10:31:20.098206 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b8xgh\" (UniqueName: \"kubernetes.io/projected/59a0201a-b799-4dce-8075-676e19effa70-kube-api-access-b8xgh\") pod \"community-operators-zlkr9\" (UID: \"59a0201a-b799-4dce-8075-676e19effa70\") " pod="openshift-marketplace/community-operators-zlkr9" Nov 25 10:31:20 crc kubenswrapper[4854]: I1125 10:31:20.098337 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/59a0201a-b799-4dce-8075-676e19effa70-catalog-content\") pod \"community-operators-zlkr9\" (UID: \"59a0201a-b799-4dce-8075-676e19effa70\") " pod="openshift-marketplace/community-operators-zlkr9" Nov 25 10:31:20 crc kubenswrapper[4854]: I1125 10:31:20.098370 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/59a0201a-b799-4dce-8075-676e19effa70-utilities\") pod \"community-operators-zlkr9\" (UID: \"59a0201a-b799-4dce-8075-676e19effa70\") " pod="openshift-marketplace/community-operators-zlkr9" Nov 25 10:31:20 crc kubenswrapper[4854]: I1125 10:31:20.201468 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b8xgh\" (UniqueName: \"kubernetes.io/projected/59a0201a-b799-4dce-8075-676e19effa70-kube-api-access-b8xgh\") pod \"community-operators-zlkr9\" (UID: \"59a0201a-b799-4dce-8075-676e19effa70\") " pod="openshift-marketplace/community-operators-zlkr9" Nov 25 10:31:20 crc kubenswrapper[4854]: I1125 10:31:20.201567 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/59a0201a-b799-4dce-8075-676e19effa70-catalog-content\") pod \"community-operators-zlkr9\" (UID: \"59a0201a-b799-4dce-8075-676e19effa70\") " pod="openshift-marketplace/community-operators-zlkr9" Nov 25 10:31:20 crc kubenswrapper[4854]: I1125 10:31:20.201597 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/59a0201a-b799-4dce-8075-676e19effa70-utilities\") pod \"community-operators-zlkr9\" (UID: \"59a0201a-b799-4dce-8075-676e19effa70\") " pod="openshift-marketplace/community-operators-zlkr9" Nov 25 10:31:20 crc kubenswrapper[4854]: I1125 10:31:20.202098 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/59a0201a-b799-4dce-8075-676e19effa70-utilities\") pod \"community-operators-zlkr9\" (UID: \"59a0201a-b799-4dce-8075-676e19effa70\") " pod="openshift-marketplace/community-operators-zlkr9" Nov 25 10:31:20 crc kubenswrapper[4854]: I1125 10:31:20.202574 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/59a0201a-b799-4dce-8075-676e19effa70-catalog-content\") pod \"community-operators-zlkr9\" (UID: \"59a0201a-b799-4dce-8075-676e19effa70\") " pod="openshift-marketplace/community-operators-zlkr9" Nov 25 10:31:20 crc kubenswrapper[4854]: I1125 10:31:20.222448 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b8xgh\" (UniqueName: \"kubernetes.io/projected/59a0201a-b799-4dce-8075-676e19effa70-kube-api-access-b8xgh\") pod \"community-operators-zlkr9\" (UID: \"59a0201a-b799-4dce-8075-676e19effa70\") " pod="openshift-marketplace/community-operators-zlkr9" Nov 25 10:31:20 crc kubenswrapper[4854]: I1125 10:31:20.309720 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zlkr9" Nov 25 10:31:21 crc kubenswrapper[4854]: I1125 10:31:21.605907 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zlkr9"] Nov 25 10:31:21 crc kubenswrapper[4854]: I1125 10:31:21.902162 4854 generic.go:334] "Generic (PLEG): container finished" podID="59a0201a-b799-4dce-8075-676e19effa70" containerID="852c487a647f10d1ebe14f48bd7f673e18be74f1716afe2089775a149b01bfd0" exitCode=0 Nov 25 10:31:21 crc kubenswrapper[4854]: I1125 10:31:21.902276 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zlkr9" event={"ID":"59a0201a-b799-4dce-8075-676e19effa70","Type":"ContainerDied","Data":"852c487a647f10d1ebe14f48bd7f673e18be74f1716afe2089775a149b01bfd0"} Nov 25 10:31:21 crc kubenswrapper[4854]: I1125 10:31:21.902456 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zlkr9" event={"ID":"59a0201a-b799-4dce-8075-676e19effa70","Type":"ContainerStarted","Data":"a85e972ceccf2e3e9a9eba643991a1cb989079dcd2f06de8380302733922e035"} Nov 25 10:31:23 crc kubenswrapper[4854]: I1125 10:31:23.923821 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zlkr9" event={"ID":"59a0201a-b799-4dce-8075-676e19effa70","Type":"ContainerStarted","Data":"d86f5f8b0c7d8587951c217b5503698365f4bfb7d41bcd8adc35d41e1296ad50"} Nov 25 10:31:26 crc kubenswrapper[4854]: I1125 10:31:26.959998 4854 generic.go:334] "Generic (PLEG): container finished" podID="59a0201a-b799-4dce-8075-676e19effa70" containerID="d86f5f8b0c7d8587951c217b5503698365f4bfb7d41bcd8adc35d41e1296ad50" exitCode=0 Nov 25 10:31:26 crc kubenswrapper[4854]: I1125 10:31:26.960828 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zlkr9" event={"ID":"59a0201a-b799-4dce-8075-676e19effa70","Type":"ContainerDied","Data":"d86f5f8b0c7d8587951c217b5503698365f4bfb7d41bcd8adc35d41e1296ad50"} Nov 25 10:31:27 crc kubenswrapper[4854]: I1125 10:31:27.973128 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zlkr9" event={"ID":"59a0201a-b799-4dce-8075-676e19effa70","Type":"ContainerStarted","Data":"5179becda340ee76d2337b9cd320fac5a7c9aa6a9434addec7ce1c3ec6cddf34"} Nov 25 10:31:28 crc kubenswrapper[4854]: I1125 10:31:28.006774 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-zlkr9" podStartSLOduration=3.482150954 podStartE2EDuration="9.006747549s" podCreationTimestamp="2025-11-25 10:31:19 +0000 UTC" firstStartedPulling="2025-11-25 10:31:21.904306169 +0000 UTC m=+3287.757299535" lastFinishedPulling="2025-11-25 10:31:27.428902754 +0000 UTC m=+3293.281896130" observedRunningTime="2025-11-25 10:31:27.995047729 +0000 UTC m=+3293.848041115" watchObservedRunningTime="2025-11-25 10:31:28.006747549 +0000 UTC m=+3293.859740935" Nov 25 10:31:30 crc kubenswrapper[4854]: I1125 10:31:30.310508 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-zlkr9" Nov 25 10:31:30 crc kubenswrapper[4854]: I1125 10:31:30.311016 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-zlkr9" Nov 25 10:31:30 crc kubenswrapper[4854]: I1125 10:31:30.369143 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-zlkr9" Nov 25 10:31:40 crc kubenswrapper[4854]: I1125 10:31:40.382233 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-zlkr9" Nov 25 10:31:40 crc kubenswrapper[4854]: I1125 10:31:40.436432 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-zlkr9"] Nov 25 10:31:41 crc kubenswrapper[4854]: I1125 10:31:41.153280 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-zlkr9" podUID="59a0201a-b799-4dce-8075-676e19effa70" containerName="registry-server" containerID="cri-o://5179becda340ee76d2337b9cd320fac5a7c9aa6a9434addec7ce1c3ec6cddf34" gracePeriod=2 Nov 25 10:31:41 crc kubenswrapper[4854]: I1125 10:31:41.719234 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zlkr9" Nov 25 10:31:41 crc kubenswrapper[4854]: I1125 10:31:41.773481 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/59a0201a-b799-4dce-8075-676e19effa70-catalog-content\") pod \"59a0201a-b799-4dce-8075-676e19effa70\" (UID: \"59a0201a-b799-4dce-8075-676e19effa70\") " Nov 25 10:31:41 crc kubenswrapper[4854]: I1125 10:31:41.773819 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b8xgh\" (UniqueName: \"kubernetes.io/projected/59a0201a-b799-4dce-8075-676e19effa70-kube-api-access-b8xgh\") pod \"59a0201a-b799-4dce-8075-676e19effa70\" (UID: \"59a0201a-b799-4dce-8075-676e19effa70\") " Nov 25 10:31:41 crc kubenswrapper[4854]: I1125 10:31:41.773882 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/59a0201a-b799-4dce-8075-676e19effa70-utilities\") pod \"59a0201a-b799-4dce-8075-676e19effa70\" (UID: \"59a0201a-b799-4dce-8075-676e19effa70\") " Nov 25 10:31:41 crc kubenswrapper[4854]: I1125 10:31:41.775136 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/59a0201a-b799-4dce-8075-676e19effa70-utilities" (OuterVolumeSpecName: "utilities") pod "59a0201a-b799-4dce-8075-676e19effa70" (UID: "59a0201a-b799-4dce-8075-676e19effa70"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:31:41 crc kubenswrapper[4854]: I1125 10:31:41.781623 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59a0201a-b799-4dce-8075-676e19effa70-kube-api-access-b8xgh" (OuterVolumeSpecName: "kube-api-access-b8xgh") pod "59a0201a-b799-4dce-8075-676e19effa70" (UID: "59a0201a-b799-4dce-8075-676e19effa70"). InnerVolumeSpecName "kube-api-access-b8xgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:31:41 crc kubenswrapper[4854]: I1125 10:31:41.822339 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/59a0201a-b799-4dce-8075-676e19effa70-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "59a0201a-b799-4dce-8075-676e19effa70" (UID: "59a0201a-b799-4dce-8075-676e19effa70"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:31:41 crc kubenswrapper[4854]: I1125 10:31:41.877209 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/59a0201a-b799-4dce-8075-676e19effa70-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:31:41 crc kubenswrapper[4854]: I1125 10:31:41.877248 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b8xgh\" (UniqueName: \"kubernetes.io/projected/59a0201a-b799-4dce-8075-676e19effa70-kube-api-access-b8xgh\") on node \"crc\" DevicePath \"\"" Nov 25 10:31:41 crc kubenswrapper[4854]: I1125 10:31:41.877260 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/59a0201a-b799-4dce-8075-676e19effa70-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:31:42 crc kubenswrapper[4854]: I1125 10:31:42.169697 4854 generic.go:334] "Generic (PLEG): container finished" podID="59a0201a-b799-4dce-8075-676e19effa70" containerID="5179becda340ee76d2337b9cd320fac5a7c9aa6a9434addec7ce1c3ec6cddf34" exitCode=0 Nov 25 10:31:42 crc kubenswrapper[4854]: I1125 10:31:42.169783 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zlkr9" Nov 25 10:31:42 crc kubenswrapper[4854]: I1125 10:31:42.169840 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zlkr9" event={"ID":"59a0201a-b799-4dce-8075-676e19effa70","Type":"ContainerDied","Data":"5179becda340ee76d2337b9cd320fac5a7c9aa6a9434addec7ce1c3ec6cddf34"} Nov 25 10:31:42 crc kubenswrapper[4854]: I1125 10:31:42.170165 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zlkr9" event={"ID":"59a0201a-b799-4dce-8075-676e19effa70","Type":"ContainerDied","Data":"a85e972ceccf2e3e9a9eba643991a1cb989079dcd2f06de8380302733922e035"} Nov 25 10:31:42 crc kubenswrapper[4854]: I1125 10:31:42.170199 4854 scope.go:117] "RemoveContainer" containerID="5179becda340ee76d2337b9cd320fac5a7c9aa6a9434addec7ce1c3ec6cddf34" Nov 25 10:31:42 crc kubenswrapper[4854]: I1125 10:31:42.207701 4854 scope.go:117] "RemoveContainer" containerID="d86f5f8b0c7d8587951c217b5503698365f4bfb7d41bcd8adc35d41e1296ad50" Nov 25 10:31:42 crc kubenswrapper[4854]: I1125 10:31:42.221643 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-zlkr9"] Nov 25 10:31:42 crc kubenswrapper[4854]: I1125 10:31:42.240452 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-zlkr9"] Nov 25 10:31:42 crc kubenswrapper[4854]: I1125 10:31:42.241932 4854 scope.go:117] "RemoveContainer" containerID="852c487a647f10d1ebe14f48bd7f673e18be74f1716afe2089775a149b01bfd0" Nov 25 10:31:42 crc kubenswrapper[4854]: I1125 10:31:42.294216 4854 scope.go:117] "RemoveContainer" containerID="5179becda340ee76d2337b9cd320fac5a7c9aa6a9434addec7ce1c3ec6cddf34" Nov 25 10:31:42 crc kubenswrapper[4854]: E1125 10:31:42.294971 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5179becda340ee76d2337b9cd320fac5a7c9aa6a9434addec7ce1c3ec6cddf34\": container with ID starting with 5179becda340ee76d2337b9cd320fac5a7c9aa6a9434addec7ce1c3ec6cddf34 not found: ID does not exist" containerID="5179becda340ee76d2337b9cd320fac5a7c9aa6a9434addec7ce1c3ec6cddf34" Nov 25 10:31:42 crc kubenswrapper[4854]: I1125 10:31:42.295027 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5179becda340ee76d2337b9cd320fac5a7c9aa6a9434addec7ce1c3ec6cddf34"} err="failed to get container status \"5179becda340ee76d2337b9cd320fac5a7c9aa6a9434addec7ce1c3ec6cddf34\": rpc error: code = NotFound desc = could not find container \"5179becda340ee76d2337b9cd320fac5a7c9aa6a9434addec7ce1c3ec6cddf34\": container with ID starting with 5179becda340ee76d2337b9cd320fac5a7c9aa6a9434addec7ce1c3ec6cddf34 not found: ID does not exist" Nov 25 10:31:42 crc kubenswrapper[4854]: I1125 10:31:42.295061 4854 scope.go:117] "RemoveContainer" containerID="d86f5f8b0c7d8587951c217b5503698365f4bfb7d41bcd8adc35d41e1296ad50" Nov 25 10:31:42 crc kubenswrapper[4854]: E1125 10:31:42.295374 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d86f5f8b0c7d8587951c217b5503698365f4bfb7d41bcd8adc35d41e1296ad50\": container with ID starting with d86f5f8b0c7d8587951c217b5503698365f4bfb7d41bcd8adc35d41e1296ad50 not found: ID does not exist" containerID="d86f5f8b0c7d8587951c217b5503698365f4bfb7d41bcd8adc35d41e1296ad50" Nov 25 10:31:42 crc kubenswrapper[4854]: I1125 10:31:42.295398 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d86f5f8b0c7d8587951c217b5503698365f4bfb7d41bcd8adc35d41e1296ad50"} err="failed to get container status \"d86f5f8b0c7d8587951c217b5503698365f4bfb7d41bcd8adc35d41e1296ad50\": rpc error: code = NotFound desc = could not find container \"d86f5f8b0c7d8587951c217b5503698365f4bfb7d41bcd8adc35d41e1296ad50\": container with ID starting with d86f5f8b0c7d8587951c217b5503698365f4bfb7d41bcd8adc35d41e1296ad50 not found: ID does not exist" Nov 25 10:31:42 crc kubenswrapper[4854]: I1125 10:31:42.295414 4854 scope.go:117] "RemoveContainer" containerID="852c487a647f10d1ebe14f48bd7f673e18be74f1716afe2089775a149b01bfd0" Nov 25 10:31:42 crc kubenswrapper[4854]: E1125 10:31:42.295721 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"852c487a647f10d1ebe14f48bd7f673e18be74f1716afe2089775a149b01bfd0\": container with ID starting with 852c487a647f10d1ebe14f48bd7f673e18be74f1716afe2089775a149b01bfd0 not found: ID does not exist" containerID="852c487a647f10d1ebe14f48bd7f673e18be74f1716afe2089775a149b01bfd0" Nov 25 10:31:42 crc kubenswrapper[4854]: I1125 10:31:42.295779 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"852c487a647f10d1ebe14f48bd7f673e18be74f1716afe2089775a149b01bfd0"} err="failed to get container status \"852c487a647f10d1ebe14f48bd7f673e18be74f1716afe2089775a149b01bfd0\": rpc error: code = NotFound desc = could not find container \"852c487a647f10d1ebe14f48bd7f673e18be74f1716afe2089775a149b01bfd0\": container with ID starting with 852c487a647f10d1ebe14f48bd7f673e18be74f1716afe2089775a149b01bfd0 not found: ID does not exist" Nov 25 10:31:43 crc kubenswrapper[4854]: I1125 10:31:43.031480 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="59a0201a-b799-4dce-8075-676e19effa70" path="/var/lib/kubelet/pods/59a0201a-b799-4dce-8075-676e19effa70/volumes" Nov 25 10:33:25 crc kubenswrapper[4854]: I1125 10:33:25.028405 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:33:25 crc kubenswrapper[4854]: I1125 10:33:25.029019 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:33:55 crc kubenswrapper[4854]: I1125 10:33:55.029241 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:33:55 crc kubenswrapper[4854]: I1125 10:33:55.029658 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:34:25 crc kubenswrapper[4854]: I1125 10:34:25.028560 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:34:25 crc kubenswrapper[4854]: I1125 10:34:25.029245 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:34:25 crc kubenswrapper[4854]: I1125 10:34:25.029316 4854 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" Nov 25 10:34:25 crc kubenswrapper[4854]: I1125 10:34:25.030496 4854 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5a924c88f816080a3b538187dc28d3d522b2b04832b0be1c69f5187e891494c7"} pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 10:34:25 crc kubenswrapper[4854]: I1125 10:34:25.030593 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" containerID="cri-o://5a924c88f816080a3b538187dc28d3d522b2b04832b0be1c69f5187e891494c7" gracePeriod=600 Nov 25 10:34:26 crc kubenswrapper[4854]: I1125 10:34:26.068301 4854 generic.go:334] "Generic (PLEG): container finished" podID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerID="5a924c88f816080a3b538187dc28d3d522b2b04832b0be1c69f5187e891494c7" exitCode=0 Nov 25 10:34:26 crc kubenswrapper[4854]: I1125 10:34:26.068827 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerDied","Data":"5a924c88f816080a3b538187dc28d3d522b2b04832b0be1c69f5187e891494c7"} Nov 25 10:34:26 crc kubenswrapper[4854]: I1125 10:34:26.068856 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerStarted","Data":"edb84fd7a59ba5186401d63de1bc42d8be0434a868fd0d53853093404fcb425e"} Nov 25 10:34:26 crc kubenswrapper[4854]: I1125 10:34:26.068872 4854 scope.go:117] "RemoveContainer" containerID="047d06823f958a969b84e35dd1c97a09fe47f798d94dae3729c5fe72dd409d06" Nov 25 10:36:25 crc kubenswrapper[4854]: I1125 10:36:25.029328 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:36:25 crc kubenswrapper[4854]: I1125 10:36:25.029798 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:36:55 crc kubenswrapper[4854]: I1125 10:36:55.028885 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:36:55 crc kubenswrapper[4854]: I1125 10:36:55.029354 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:37:25 crc kubenswrapper[4854]: I1125 10:37:25.029422 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:37:25 crc kubenswrapper[4854]: I1125 10:37:25.030183 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:37:25 crc kubenswrapper[4854]: I1125 10:37:25.033500 4854 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" Nov 25 10:37:25 crc kubenswrapper[4854]: I1125 10:37:25.034556 4854 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"edb84fd7a59ba5186401d63de1bc42d8be0434a868fd0d53853093404fcb425e"} pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 10:37:25 crc kubenswrapper[4854]: I1125 10:37:25.034648 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" containerID="cri-o://edb84fd7a59ba5186401d63de1bc42d8be0434a868fd0d53853093404fcb425e" gracePeriod=600 Nov 25 10:37:25 crc kubenswrapper[4854]: E1125 10:37:25.164828 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:37:25 crc kubenswrapper[4854]: I1125 10:37:25.197850 4854 generic.go:334] "Generic (PLEG): container finished" podID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerID="edb84fd7a59ba5186401d63de1bc42d8be0434a868fd0d53853093404fcb425e" exitCode=0 Nov 25 10:37:25 crc kubenswrapper[4854]: I1125 10:37:25.197926 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerDied","Data":"edb84fd7a59ba5186401d63de1bc42d8be0434a868fd0d53853093404fcb425e"} Nov 25 10:37:25 crc kubenswrapper[4854]: I1125 10:37:25.197979 4854 scope.go:117] "RemoveContainer" containerID="5a924c88f816080a3b538187dc28d3d522b2b04832b0be1c69f5187e891494c7" Nov 25 10:37:25 crc kubenswrapper[4854]: I1125 10:37:25.199176 4854 scope.go:117] "RemoveContainer" containerID="edb84fd7a59ba5186401d63de1bc42d8be0434a868fd0d53853093404fcb425e" Nov 25 10:37:25 crc kubenswrapper[4854]: E1125 10:37:25.200018 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:37:39 crc kubenswrapper[4854]: I1125 10:37:39.014319 4854 scope.go:117] "RemoveContainer" containerID="edb84fd7a59ba5186401d63de1bc42d8be0434a868fd0d53853093404fcb425e" Nov 25 10:37:39 crc kubenswrapper[4854]: E1125 10:37:39.015207 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:37:44 crc kubenswrapper[4854]: I1125 10:37:44.406571 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-5dbbcc5579-hmqdg" podUID="755d55c2-0eaa-4186-bd25-00e8c34166be" containerName="proxy-server" probeResult="failure" output="HTTP probe failed with statuscode: 502" Nov 25 10:37:53 crc kubenswrapper[4854]: I1125 10:37:53.013290 4854 scope.go:117] "RemoveContainer" containerID="edb84fd7a59ba5186401d63de1bc42d8be0434a868fd0d53853093404fcb425e" Nov 25 10:37:53 crc kubenswrapper[4854]: E1125 10:37:53.014409 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:38:05 crc kubenswrapper[4854]: I1125 10:38:05.022036 4854 scope.go:117] "RemoveContainer" containerID="edb84fd7a59ba5186401d63de1bc42d8be0434a868fd0d53853093404fcb425e" Nov 25 10:38:05 crc kubenswrapper[4854]: E1125 10:38:05.023008 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:38:16 crc kubenswrapper[4854]: I1125 10:38:16.014595 4854 scope.go:117] "RemoveContainer" containerID="edb84fd7a59ba5186401d63de1bc42d8be0434a868fd0d53853093404fcb425e" Nov 25 10:38:16 crc kubenswrapper[4854]: E1125 10:38:16.015600 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:38:21 crc kubenswrapper[4854]: I1125 10:38:21.959643 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-4d4vw"] Nov 25 10:38:21 crc kubenswrapper[4854]: E1125 10:38:21.960767 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59a0201a-b799-4dce-8075-676e19effa70" containerName="extract-content" Nov 25 10:38:21 crc kubenswrapper[4854]: I1125 10:38:21.960785 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="59a0201a-b799-4dce-8075-676e19effa70" containerName="extract-content" Nov 25 10:38:21 crc kubenswrapper[4854]: E1125 10:38:21.960800 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59a0201a-b799-4dce-8075-676e19effa70" containerName="extract-utilities" Nov 25 10:38:21 crc kubenswrapper[4854]: I1125 10:38:21.960809 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="59a0201a-b799-4dce-8075-676e19effa70" containerName="extract-utilities" Nov 25 10:38:21 crc kubenswrapper[4854]: E1125 10:38:21.960830 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59a0201a-b799-4dce-8075-676e19effa70" containerName="registry-server" Nov 25 10:38:21 crc kubenswrapper[4854]: I1125 10:38:21.960838 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="59a0201a-b799-4dce-8075-676e19effa70" containerName="registry-server" Nov 25 10:38:21 crc kubenswrapper[4854]: I1125 10:38:21.961124 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="59a0201a-b799-4dce-8075-676e19effa70" containerName="registry-server" Nov 25 10:38:21 crc kubenswrapper[4854]: I1125 10:38:21.963335 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4d4vw" Nov 25 10:38:21 crc kubenswrapper[4854]: I1125 10:38:21.986441 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4d4vw"] Nov 25 10:38:22 crc kubenswrapper[4854]: I1125 10:38:22.038376 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b3b698a-f650-4b87-9381-a9b23f7a6899-catalog-content\") pod \"certified-operators-4d4vw\" (UID: \"5b3b698a-f650-4b87-9381-a9b23f7a6899\") " pod="openshift-marketplace/certified-operators-4d4vw" Nov 25 10:38:22 crc kubenswrapper[4854]: I1125 10:38:22.038446 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b3b698a-f650-4b87-9381-a9b23f7a6899-utilities\") pod \"certified-operators-4d4vw\" (UID: \"5b3b698a-f650-4b87-9381-a9b23f7a6899\") " pod="openshift-marketplace/certified-operators-4d4vw" Nov 25 10:38:22 crc kubenswrapper[4854]: I1125 10:38:22.038522 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t7jhw\" (UniqueName: \"kubernetes.io/projected/5b3b698a-f650-4b87-9381-a9b23f7a6899-kube-api-access-t7jhw\") pod \"certified-operators-4d4vw\" (UID: \"5b3b698a-f650-4b87-9381-a9b23f7a6899\") " pod="openshift-marketplace/certified-operators-4d4vw" Nov 25 10:38:22 crc kubenswrapper[4854]: I1125 10:38:22.142011 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b3b698a-f650-4b87-9381-a9b23f7a6899-catalog-content\") pod \"certified-operators-4d4vw\" (UID: \"5b3b698a-f650-4b87-9381-a9b23f7a6899\") " pod="openshift-marketplace/certified-operators-4d4vw" Nov 25 10:38:22 crc kubenswrapper[4854]: I1125 10:38:22.142169 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b3b698a-f650-4b87-9381-a9b23f7a6899-utilities\") pod \"certified-operators-4d4vw\" (UID: \"5b3b698a-f650-4b87-9381-a9b23f7a6899\") " pod="openshift-marketplace/certified-operators-4d4vw" Nov 25 10:38:22 crc kubenswrapper[4854]: I1125 10:38:22.142280 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t7jhw\" (UniqueName: \"kubernetes.io/projected/5b3b698a-f650-4b87-9381-a9b23f7a6899-kube-api-access-t7jhw\") pod \"certified-operators-4d4vw\" (UID: \"5b3b698a-f650-4b87-9381-a9b23f7a6899\") " pod="openshift-marketplace/certified-operators-4d4vw" Nov 25 10:38:22 crc kubenswrapper[4854]: I1125 10:38:22.142412 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b3b698a-f650-4b87-9381-a9b23f7a6899-catalog-content\") pod \"certified-operators-4d4vw\" (UID: \"5b3b698a-f650-4b87-9381-a9b23f7a6899\") " pod="openshift-marketplace/certified-operators-4d4vw" Nov 25 10:38:22 crc kubenswrapper[4854]: I1125 10:38:22.142460 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b3b698a-f650-4b87-9381-a9b23f7a6899-utilities\") pod \"certified-operators-4d4vw\" (UID: \"5b3b698a-f650-4b87-9381-a9b23f7a6899\") " pod="openshift-marketplace/certified-operators-4d4vw" Nov 25 10:38:22 crc kubenswrapper[4854]: I1125 10:38:22.166049 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t7jhw\" (UniqueName: \"kubernetes.io/projected/5b3b698a-f650-4b87-9381-a9b23f7a6899-kube-api-access-t7jhw\") pod \"certified-operators-4d4vw\" (UID: \"5b3b698a-f650-4b87-9381-a9b23f7a6899\") " pod="openshift-marketplace/certified-operators-4d4vw" Nov 25 10:38:22 crc kubenswrapper[4854]: I1125 10:38:22.302770 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4d4vw" Nov 25 10:38:22 crc kubenswrapper[4854]: I1125 10:38:22.947612 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4d4vw"] Nov 25 10:38:23 crc kubenswrapper[4854]: I1125 10:38:23.922909 4854 generic.go:334] "Generic (PLEG): container finished" podID="5b3b698a-f650-4b87-9381-a9b23f7a6899" containerID="81d9c0dc8dcb8ec6633ee7cd2163d4d8e83a0ad83db961b81d77ad64b8ba3992" exitCode=0 Nov 25 10:38:23 crc kubenswrapper[4854]: I1125 10:38:23.923239 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4d4vw" event={"ID":"5b3b698a-f650-4b87-9381-a9b23f7a6899","Type":"ContainerDied","Data":"81d9c0dc8dcb8ec6633ee7cd2163d4d8e83a0ad83db961b81d77ad64b8ba3992"} Nov 25 10:38:23 crc kubenswrapper[4854]: I1125 10:38:23.923271 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4d4vw" event={"ID":"5b3b698a-f650-4b87-9381-a9b23f7a6899","Type":"ContainerStarted","Data":"92f77c6a7d815562cf801c863839711c035ba48f644f890cbf139a2854f67fd6"} Nov 25 10:38:23 crc kubenswrapper[4854]: I1125 10:38:23.931262 4854 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 10:38:25 crc kubenswrapper[4854]: I1125 10:38:25.962518 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4d4vw" event={"ID":"5b3b698a-f650-4b87-9381-a9b23f7a6899","Type":"ContainerStarted","Data":"e96b7fdd1217994885a2458089338b145bd62ddf48e4fb940f4f51fb948164ee"} Nov 25 10:38:27 crc kubenswrapper[4854]: I1125 10:38:27.014259 4854 scope.go:117] "RemoveContainer" containerID="edb84fd7a59ba5186401d63de1bc42d8be0434a868fd0d53853093404fcb425e" Nov 25 10:38:27 crc kubenswrapper[4854]: E1125 10:38:27.014980 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:38:28 crc kubenswrapper[4854]: I1125 10:38:28.992908 4854 generic.go:334] "Generic (PLEG): container finished" podID="5b3b698a-f650-4b87-9381-a9b23f7a6899" containerID="e96b7fdd1217994885a2458089338b145bd62ddf48e4fb940f4f51fb948164ee" exitCode=0 Nov 25 10:38:28 crc kubenswrapper[4854]: I1125 10:38:28.992991 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4d4vw" event={"ID":"5b3b698a-f650-4b87-9381-a9b23f7a6899","Type":"ContainerDied","Data":"e96b7fdd1217994885a2458089338b145bd62ddf48e4fb940f4f51fb948164ee"} Nov 25 10:38:30 crc kubenswrapper[4854]: I1125 10:38:30.006934 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4d4vw" event={"ID":"5b3b698a-f650-4b87-9381-a9b23f7a6899","Type":"ContainerStarted","Data":"e200e5fea0fb94fa20763df2dd94a968a67b707242aba78c884d0ff7dddafb50"} Nov 25 10:38:30 crc kubenswrapper[4854]: I1125 10:38:30.037924 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-4d4vw" podStartSLOduration=3.525755693 podStartE2EDuration="9.037898407s" podCreationTimestamp="2025-11-25 10:38:21 +0000 UTC" firstStartedPulling="2025-11-25 10:38:23.930986134 +0000 UTC m=+3709.783979520" lastFinishedPulling="2025-11-25 10:38:29.443128858 +0000 UTC m=+3715.296122234" observedRunningTime="2025-11-25 10:38:30.027861832 +0000 UTC m=+3715.880855228" watchObservedRunningTime="2025-11-25 10:38:30.037898407 +0000 UTC m=+3715.890891783" Nov 25 10:38:32 crc kubenswrapper[4854]: I1125 10:38:32.303334 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-4d4vw" Nov 25 10:38:32 crc kubenswrapper[4854]: I1125 10:38:32.304813 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-4d4vw" Nov 25 10:38:33 crc kubenswrapper[4854]: I1125 10:38:33.386317 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-4d4vw" podUID="5b3b698a-f650-4b87-9381-a9b23f7a6899" containerName="registry-server" probeResult="failure" output=< Nov 25 10:38:33 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 10:38:33 crc kubenswrapper[4854]: > Nov 25 10:38:40 crc kubenswrapper[4854]: I1125 10:38:40.014082 4854 scope.go:117] "RemoveContainer" containerID="edb84fd7a59ba5186401d63de1bc42d8be0434a868fd0d53853093404fcb425e" Nov 25 10:38:40 crc kubenswrapper[4854]: E1125 10:38:40.015070 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:38:42 crc kubenswrapper[4854]: I1125 10:38:42.397028 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-4d4vw" Nov 25 10:38:42 crc kubenswrapper[4854]: I1125 10:38:42.457339 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-4d4vw" Nov 25 10:38:42 crc kubenswrapper[4854]: I1125 10:38:42.650376 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4d4vw"] Nov 25 10:38:44 crc kubenswrapper[4854]: I1125 10:38:44.169172 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-4d4vw" podUID="5b3b698a-f650-4b87-9381-a9b23f7a6899" containerName="registry-server" containerID="cri-o://e200e5fea0fb94fa20763df2dd94a968a67b707242aba78c884d0ff7dddafb50" gracePeriod=2 Nov 25 10:38:44 crc kubenswrapper[4854]: I1125 10:38:44.860045 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4d4vw" Nov 25 10:38:45 crc kubenswrapper[4854]: I1125 10:38:45.029396 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b3b698a-f650-4b87-9381-a9b23f7a6899-catalog-content\") pod \"5b3b698a-f650-4b87-9381-a9b23f7a6899\" (UID: \"5b3b698a-f650-4b87-9381-a9b23f7a6899\") " Nov 25 10:38:45 crc kubenswrapper[4854]: I1125 10:38:45.029555 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b3b698a-f650-4b87-9381-a9b23f7a6899-utilities\") pod \"5b3b698a-f650-4b87-9381-a9b23f7a6899\" (UID: \"5b3b698a-f650-4b87-9381-a9b23f7a6899\") " Nov 25 10:38:45 crc kubenswrapper[4854]: I1125 10:38:45.029861 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t7jhw\" (UniqueName: \"kubernetes.io/projected/5b3b698a-f650-4b87-9381-a9b23f7a6899-kube-api-access-t7jhw\") pod \"5b3b698a-f650-4b87-9381-a9b23f7a6899\" (UID: \"5b3b698a-f650-4b87-9381-a9b23f7a6899\") " Nov 25 10:38:45 crc kubenswrapper[4854]: I1125 10:38:45.030605 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b3b698a-f650-4b87-9381-a9b23f7a6899-utilities" (OuterVolumeSpecName: "utilities") pod "5b3b698a-f650-4b87-9381-a9b23f7a6899" (UID: "5b3b698a-f650-4b87-9381-a9b23f7a6899"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:38:45 crc kubenswrapper[4854]: I1125 10:38:45.042668 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b3b698a-f650-4b87-9381-a9b23f7a6899-kube-api-access-t7jhw" (OuterVolumeSpecName: "kube-api-access-t7jhw") pod "5b3b698a-f650-4b87-9381-a9b23f7a6899" (UID: "5b3b698a-f650-4b87-9381-a9b23f7a6899"). InnerVolumeSpecName "kube-api-access-t7jhw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:38:45 crc kubenswrapper[4854]: I1125 10:38:45.093293 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b3b698a-f650-4b87-9381-a9b23f7a6899-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5b3b698a-f650-4b87-9381-a9b23f7a6899" (UID: "5b3b698a-f650-4b87-9381-a9b23f7a6899"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:38:45 crc kubenswrapper[4854]: I1125 10:38:45.133460 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b3b698a-f650-4b87-9381-a9b23f7a6899-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:38:45 crc kubenswrapper[4854]: I1125 10:38:45.133495 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b3b698a-f650-4b87-9381-a9b23f7a6899-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:38:45 crc kubenswrapper[4854]: I1125 10:38:45.133508 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t7jhw\" (UniqueName: \"kubernetes.io/projected/5b3b698a-f650-4b87-9381-a9b23f7a6899-kube-api-access-t7jhw\") on node \"crc\" DevicePath \"\"" Nov 25 10:38:45 crc kubenswrapper[4854]: I1125 10:38:45.181574 4854 generic.go:334] "Generic (PLEG): container finished" podID="5b3b698a-f650-4b87-9381-a9b23f7a6899" containerID="e200e5fea0fb94fa20763df2dd94a968a67b707242aba78c884d0ff7dddafb50" exitCode=0 Nov 25 10:38:45 crc kubenswrapper[4854]: I1125 10:38:45.181619 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4d4vw" event={"ID":"5b3b698a-f650-4b87-9381-a9b23f7a6899","Type":"ContainerDied","Data":"e200e5fea0fb94fa20763df2dd94a968a67b707242aba78c884d0ff7dddafb50"} Nov 25 10:38:45 crc kubenswrapper[4854]: I1125 10:38:45.181649 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4d4vw" event={"ID":"5b3b698a-f650-4b87-9381-a9b23f7a6899","Type":"ContainerDied","Data":"92f77c6a7d815562cf801c863839711c035ba48f644f890cbf139a2854f67fd6"} Nov 25 10:38:45 crc kubenswrapper[4854]: I1125 10:38:45.181656 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4d4vw" Nov 25 10:38:45 crc kubenswrapper[4854]: I1125 10:38:45.181664 4854 scope.go:117] "RemoveContainer" containerID="e200e5fea0fb94fa20763df2dd94a968a67b707242aba78c884d0ff7dddafb50" Nov 25 10:38:45 crc kubenswrapper[4854]: I1125 10:38:45.222699 4854 scope.go:117] "RemoveContainer" containerID="e96b7fdd1217994885a2458089338b145bd62ddf48e4fb940f4f51fb948164ee" Nov 25 10:38:45 crc kubenswrapper[4854]: I1125 10:38:45.230508 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4d4vw"] Nov 25 10:38:45 crc kubenswrapper[4854]: I1125 10:38:45.240196 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-4d4vw"] Nov 25 10:38:45 crc kubenswrapper[4854]: I1125 10:38:45.248972 4854 scope.go:117] "RemoveContainer" containerID="81d9c0dc8dcb8ec6633ee7cd2163d4d8e83a0ad83db961b81d77ad64b8ba3992" Nov 25 10:38:45 crc kubenswrapper[4854]: I1125 10:38:45.308821 4854 scope.go:117] "RemoveContainer" containerID="e200e5fea0fb94fa20763df2dd94a968a67b707242aba78c884d0ff7dddafb50" Nov 25 10:38:45 crc kubenswrapper[4854]: E1125 10:38:45.309486 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e200e5fea0fb94fa20763df2dd94a968a67b707242aba78c884d0ff7dddafb50\": container with ID starting with e200e5fea0fb94fa20763df2dd94a968a67b707242aba78c884d0ff7dddafb50 not found: ID does not exist" containerID="e200e5fea0fb94fa20763df2dd94a968a67b707242aba78c884d0ff7dddafb50" Nov 25 10:38:45 crc kubenswrapper[4854]: I1125 10:38:45.309564 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e200e5fea0fb94fa20763df2dd94a968a67b707242aba78c884d0ff7dddafb50"} err="failed to get container status \"e200e5fea0fb94fa20763df2dd94a968a67b707242aba78c884d0ff7dddafb50\": rpc error: code = NotFound desc = could not find container \"e200e5fea0fb94fa20763df2dd94a968a67b707242aba78c884d0ff7dddafb50\": container with ID starting with e200e5fea0fb94fa20763df2dd94a968a67b707242aba78c884d0ff7dddafb50 not found: ID does not exist" Nov 25 10:38:45 crc kubenswrapper[4854]: I1125 10:38:45.309626 4854 scope.go:117] "RemoveContainer" containerID="e96b7fdd1217994885a2458089338b145bd62ddf48e4fb940f4f51fb948164ee" Nov 25 10:38:45 crc kubenswrapper[4854]: E1125 10:38:45.310103 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e96b7fdd1217994885a2458089338b145bd62ddf48e4fb940f4f51fb948164ee\": container with ID starting with e96b7fdd1217994885a2458089338b145bd62ddf48e4fb940f4f51fb948164ee not found: ID does not exist" containerID="e96b7fdd1217994885a2458089338b145bd62ddf48e4fb940f4f51fb948164ee" Nov 25 10:38:45 crc kubenswrapper[4854]: I1125 10:38:45.310135 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e96b7fdd1217994885a2458089338b145bd62ddf48e4fb940f4f51fb948164ee"} err="failed to get container status \"e96b7fdd1217994885a2458089338b145bd62ddf48e4fb940f4f51fb948164ee\": rpc error: code = NotFound desc = could not find container \"e96b7fdd1217994885a2458089338b145bd62ddf48e4fb940f4f51fb948164ee\": container with ID starting with e96b7fdd1217994885a2458089338b145bd62ddf48e4fb940f4f51fb948164ee not found: ID does not exist" Nov 25 10:38:45 crc kubenswrapper[4854]: I1125 10:38:45.310156 4854 scope.go:117] "RemoveContainer" containerID="81d9c0dc8dcb8ec6633ee7cd2163d4d8e83a0ad83db961b81d77ad64b8ba3992" Nov 25 10:38:45 crc kubenswrapper[4854]: E1125 10:38:45.310568 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"81d9c0dc8dcb8ec6633ee7cd2163d4d8e83a0ad83db961b81d77ad64b8ba3992\": container with ID starting with 81d9c0dc8dcb8ec6633ee7cd2163d4d8e83a0ad83db961b81d77ad64b8ba3992 not found: ID does not exist" containerID="81d9c0dc8dcb8ec6633ee7cd2163d4d8e83a0ad83db961b81d77ad64b8ba3992" Nov 25 10:38:45 crc kubenswrapper[4854]: I1125 10:38:45.310594 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"81d9c0dc8dcb8ec6633ee7cd2163d4d8e83a0ad83db961b81d77ad64b8ba3992"} err="failed to get container status \"81d9c0dc8dcb8ec6633ee7cd2163d4d8e83a0ad83db961b81d77ad64b8ba3992\": rpc error: code = NotFound desc = could not find container \"81d9c0dc8dcb8ec6633ee7cd2163d4d8e83a0ad83db961b81d77ad64b8ba3992\": container with ID starting with 81d9c0dc8dcb8ec6633ee7cd2163d4d8e83a0ad83db961b81d77ad64b8ba3992 not found: ID does not exist" Nov 25 10:38:47 crc kubenswrapper[4854]: I1125 10:38:47.026265 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b3b698a-f650-4b87-9381-a9b23f7a6899" path="/var/lib/kubelet/pods/5b3b698a-f650-4b87-9381-a9b23f7a6899/volumes" Nov 25 10:38:55 crc kubenswrapper[4854]: I1125 10:38:55.023512 4854 scope.go:117] "RemoveContainer" containerID="edb84fd7a59ba5186401d63de1bc42d8be0434a868fd0d53853093404fcb425e" Nov 25 10:38:55 crc kubenswrapper[4854]: E1125 10:38:55.024471 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:39:09 crc kubenswrapper[4854]: I1125 10:39:09.014083 4854 scope.go:117] "RemoveContainer" containerID="edb84fd7a59ba5186401d63de1bc42d8be0434a868fd0d53853093404fcb425e" Nov 25 10:39:09 crc kubenswrapper[4854]: E1125 10:39:09.015307 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:39:23 crc kubenswrapper[4854]: I1125 10:39:23.014661 4854 scope.go:117] "RemoveContainer" containerID="edb84fd7a59ba5186401d63de1bc42d8be0434a868fd0d53853093404fcb425e" Nov 25 10:39:23 crc kubenswrapper[4854]: E1125 10:39:23.015481 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:39:36 crc kubenswrapper[4854]: I1125 10:39:36.013893 4854 scope.go:117] "RemoveContainer" containerID="edb84fd7a59ba5186401d63de1bc42d8be0434a868fd0d53853093404fcb425e" Nov 25 10:39:36 crc kubenswrapper[4854]: E1125 10:39:36.014630 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:39:50 crc kubenswrapper[4854]: I1125 10:39:50.015153 4854 scope.go:117] "RemoveContainer" containerID="edb84fd7a59ba5186401d63de1bc42d8be0434a868fd0d53853093404fcb425e" Nov 25 10:39:50 crc kubenswrapper[4854]: E1125 10:39:50.016357 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:40:03 crc kubenswrapper[4854]: I1125 10:40:03.013812 4854 scope.go:117] "RemoveContainer" containerID="edb84fd7a59ba5186401d63de1bc42d8be0434a868fd0d53853093404fcb425e" Nov 25 10:40:03 crc kubenswrapper[4854]: E1125 10:40:03.014912 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:40:18 crc kubenswrapper[4854]: I1125 10:40:18.015002 4854 scope.go:117] "RemoveContainer" containerID="edb84fd7a59ba5186401d63de1bc42d8be0434a868fd0d53853093404fcb425e" Nov 25 10:40:18 crc kubenswrapper[4854]: E1125 10:40:18.016093 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:40:20 crc kubenswrapper[4854]: I1125 10:40:20.895611 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-7rns4"] Nov 25 10:40:20 crc kubenswrapper[4854]: E1125 10:40:20.898874 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b3b698a-f650-4b87-9381-a9b23f7a6899" containerName="extract-content" Nov 25 10:40:20 crc kubenswrapper[4854]: I1125 10:40:20.899002 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b3b698a-f650-4b87-9381-a9b23f7a6899" containerName="extract-content" Nov 25 10:40:20 crc kubenswrapper[4854]: E1125 10:40:20.899155 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b3b698a-f650-4b87-9381-a9b23f7a6899" containerName="registry-server" Nov 25 10:40:20 crc kubenswrapper[4854]: I1125 10:40:20.899229 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b3b698a-f650-4b87-9381-a9b23f7a6899" containerName="registry-server" Nov 25 10:40:20 crc kubenswrapper[4854]: E1125 10:40:20.899328 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b3b698a-f650-4b87-9381-a9b23f7a6899" containerName="extract-utilities" Nov 25 10:40:20 crc kubenswrapper[4854]: I1125 10:40:20.899448 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b3b698a-f650-4b87-9381-a9b23f7a6899" containerName="extract-utilities" Nov 25 10:40:20 crc kubenswrapper[4854]: I1125 10:40:20.899849 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b3b698a-f650-4b87-9381-a9b23f7a6899" containerName="registry-server" Nov 25 10:40:20 crc kubenswrapper[4854]: I1125 10:40:20.902330 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7rns4" Nov 25 10:40:20 crc kubenswrapper[4854]: I1125 10:40:20.976797 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7c02607d-5d74-4f4f-92c8-0aa57451a563-utilities\") pod \"redhat-operators-7rns4\" (UID: \"7c02607d-5d74-4f4f-92c8-0aa57451a563\") " pod="openshift-marketplace/redhat-operators-7rns4" Nov 25 10:40:20 crc kubenswrapper[4854]: I1125 10:40:20.979027 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g98vh\" (UniqueName: \"kubernetes.io/projected/7c02607d-5d74-4f4f-92c8-0aa57451a563-kube-api-access-g98vh\") pod \"redhat-operators-7rns4\" (UID: \"7c02607d-5d74-4f4f-92c8-0aa57451a563\") " pod="openshift-marketplace/redhat-operators-7rns4" Nov 25 10:40:20 crc kubenswrapper[4854]: I1125 10:40:20.979281 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7c02607d-5d74-4f4f-92c8-0aa57451a563-catalog-content\") pod \"redhat-operators-7rns4\" (UID: \"7c02607d-5d74-4f4f-92c8-0aa57451a563\") " pod="openshift-marketplace/redhat-operators-7rns4" Nov 25 10:40:20 crc kubenswrapper[4854]: I1125 10:40:20.995344 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7rns4"] Nov 25 10:40:21 crc kubenswrapper[4854]: I1125 10:40:21.082185 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7c02607d-5d74-4f4f-92c8-0aa57451a563-utilities\") pod \"redhat-operators-7rns4\" (UID: \"7c02607d-5d74-4f4f-92c8-0aa57451a563\") " pod="openshift-marketplace/redhat-operators-7rns4" Nov 25 10:40:21 crc kubenswrapper[4854]: I1125 10:40:21.082277 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g98vh\" (UniqueName: \"kubernetes.io/projected/7c02607d-5d74-4f4f-92c8-0aa57451a563-kube-api-access-g98vh\") pod \"redhat-operators-7rns4\" (UID: \"7c02607d-5d74-4f4f-92c8-0aa57451a563\") " pod="openshift-marketplace/redhat-operators-7rns4" Nov 25 10:40:21 crc kubenswrapper[4854]: I1125 10:40:21.082327 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7c02607d-5d74-4f4f-92c8-0aa57451a563-catalog-content\") pod \"redhat-operators-7rns4\" (UID: \"7c02607d-5d74-4f4f-92c8-0aa57451a563\") " pod="openshift-marketplace/redhat-operators-7rns4" Nov 25 10:40:21 crc kubenswrapper[4854]: I1125 10:40:21.082900 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7c02607d-5d74-4f4f-92c8-0aa57451a563-catalog-content\") pod \"redhat-operators-7rns4\" (UID: \"7c02607d-5d74-4f4f-92c8-0aa57451a563\") " pod="openshift-marketplace/redhat-operators-7rns4" Nov 25 10:40:21 crc kubenswrapper[4854]: I1125 10:40:21.083024 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7c02607d-5d74-4f4f-92c8-0aa57451a563-utilities\") pod \"redhat-operators-7rns4\" (UID: \"7c02607d-5d74-4f4f-92c8-0aa57451a563\") " pod="openshift-marketplace/redhat-operators-7rns4" Nov 25 10:40:21 crc kubenswrapper[4854]: I1125 10:40:21.105811 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g98vh\" (UniqueName: \"kubernetes.io/projected/7c02607d-5d74-4f4f-92c8-0aa57451a563-kube-api-access-g98vh\") pod \"redhat-operators-7rns4\" (UID: \"7c02607d-5d74-4f4f-92c8-0aa57451a563\") " pod="openshift-marketplace/redhat-operators-7rns4" Nov 25 10:40:21 crc kubenswrapper[4854]: I1125 10:40:21.309699 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7rns4" Nov 25 10:40:21 crc kubenswrapper[4854]: I1125 10:40:21.840000 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7rns4"] Nov 25 10:40:22 crc kubenswrapper[4854]: I1125 10:40:22.420646 4854 generic.go:334] "Generic (PLEG): container finished" podID="7c02607d-5d74-4f4f-92c8-0aa57451a563" containerID="c0e5a3acf98fa95fd6b1b7c15f77d80200724e9b2e3b33491a300e82c54743fe" exitCode=0 Nov 25 10:40:22 crc kubenswrapper[4854]: I1125 10:40:22.420744 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7rns4" event={"ID":"7c02607d-5d74-4f4f-92c8-0aa57451a563","Type":"ContainerDied","Data":"c0e5a3acf98fa95fd6b1b7c15f77d80200724e9b2e3b33491a300e82c54743fe"} Nov 25 10:40:22 crc kubenswrapper[4854]: I1125 10:40:22.420991 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7rns4" event={"ID":"7c02607d-5d74-4f4f-92c8-0aa57451a563","Type":"ContainerStarted","Data":"3ef9e4479e5c0ce717ac9bdbc481f30438cdfcb623537ff7c5cfec7ee2759900"} Nov 25 10:40:23 crc kubenswrapper[4854]: I1125 10:40:23.436470 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7rns4" event={"ID":"7c02607d-5d74-4f4f-92c8-0aa57451a563","Type":"ContainerStarted","Data":"5d61ab77f06916ce0422201b065cfdc3567b03ee156944a140928ae4b324a903"} Nov 25 10:40:27 crc kubenswrapper[4854]: I1125 10:40:27.489262 4854 generic.go:334] "Generic (PLEG): container finished" podID="7c02607d-5d74-4f4f-92c8-0aa57451a563" containerID="5d61ab77f06916ce0422201b065cfdc3567b03ee156944a140928ae4b324a903" exitCode=0 Nov 25 10:40:27 crc kubenswrapper[4854]: I1125 10:40:27.489372 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7rns4" event={"ID":"7c02607d-5d74-4f4f-92c8-0aa57451a563","Type":"ContainerDied","Data":"5d61ab77f06916ce0422201b065cfdc3567b03ee156944a140928ae4b324a903"} Nov 25 10:40:28 crc kubenswrapper[4854]: I1125 10:40:28.510457 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7rns4" event={"ID":"7c02607d-5d74-4f4f-92c8-0aa57451a563","Type":"ContainerStarted","Data":"37c346ba44070af50329c728a8adf27a51d761a9208711c94b5eba54a4884590"} Nov 25 10:40:28 crc kubenswrapper[4854]: I1125 10:40:28.532401 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-7rns4" podStartSLOduration=3.068010821 podStartE2EDuration="8.532381487s" podCreationTimestamp="2025-11-25 10:40:20 +0000 UTC" firstStartedPulling="2025-11-25 10:40:22.424001755 +0000 UTC m=+3828.276995131" lastFinishedPulling="2025-11-25 10:40:27.888372401 +0000 UTC m=+3833.741365797" observedRunningTime="2025-11-25 10:40:28.527974247 +0000 UTC m=+3834.380967643" watchObservedRunningTime="2025-11-25 10:40:28.532381487 +0000 UTC m=+3834.385374863" Nov 25 10:40:31 crc kubenswrapper[4854]: I1125 10:40:31.013373 4854 scope.go:117] "RemoveContainer" containerID="edb84fd7a59ba5186401d63de1bc42d8be0434a868fd0d53853093404fcb425e" Nov 25 10:40:31 crc kubenswrapper[4854]: E1125 10:40:31.015163 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:40:31 crc kubenswrapper[4854]: I1125 10:40:31.310024 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-7rns4" Nov 25 10:40:31 crc kubenswrapper[4854]: I1125 10:40:31.310654 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-7rns4" Nov 25 10:40:32 crc kubenswrapper[4854]: I1125 10:40:32.391894 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-7rns4" podUID="7c02607d-5d74-4f4f-92c8-0aa57451a563" containerName="registry-server" probeResult="failure" output=< Nov 25 10:40:32 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 10:40:32 crc kubenswrapper[4854]: > Nov 25 10:40:41 crc kubenswrapper[4854]: I1125 10:40:41.375163 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-7rns4" Nov 25 10:40:41 crc kubenswrapper[4854]: I1125 10:40:41.455916 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-7rns4" Nov 25 10:40:41 crc kubenswrapper[4854]: I1125 10:40:41.638371 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7rns4"] Nov 25 10:40:42 crc kubenswrapper[4854]: I1125 10:40:42.702727 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-7rns4" podUID="7c02607d-5d74-4f4f-92c8-0aa57451a563" containerName="registry-server" containerID="cri-o://37c346ba44070af50329c728a8adf27a51d761a9208711c94b5eba54a4884590" gracePeriod=2 Nov 25 10:40:43 crc kubenswrapper[4854]: I1125 10:40:43.385827 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7rns4" Nov 25 10:40:43 crc kubenswrapper[4854]: I1125 10:40:43.450621 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7c02607d-5d74-4f4f-92c8-0aa57451a563-catalog-content\") pod \"7c02607d-5d74-4f4f-92c8-0aa57451a563\" (UID: \"7c02607d-5d74-4f4f-92c8-0aa57451a563\") " Nov 25 10:40:43 crc kubenswrapper[4854]: I1125 10:40:43.450668 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7c02607d-5d74-4f4f-92c8-0aa57451a563-utilities\") pod \"7c02607d-5d74-4f4f-92c8-0aa57451a563\" (UID: \"7c02607d-5d74-4f4f-92c8-0aa57451a563\") " Nov 25 10:40:43 crc kubenswrapper[4854]: I1125 10:40:43.450777 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g98vh\" (UniqueName: \"kubernetes.io/projected/7c02607d-5d74-4f4f-92c8-0aa57451a563-kube-api-access-g98vh\") pod \"7c02607d-5d74-4f4f-92c8-0aa57451a563\" (UID: \"7c02607d-5d74-4f4f-92c8-0aa57451a563\") " Nov 25 10:40:43 crc kubenswrapper[4854]: I1125 10:40:43.451540 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7c02607d-5d74-4f4f-92c8-0aa57451a563-utilities" (OuterVolumeSpecName: "utilities") pod "7c02607d-5d74-4f4f-92c8-0aa57451a563" (UID: "7c02607d-5d74-4f4f-92c8-0aa57451a563"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:40:43 crc kubenswrapper[4854]: I1125 10:40:43.452018 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7c02607d-5d74-4f4f-92c8-0aa57451a563-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:40:43 crc kubenswrapper[4854]: I1125 10:40:43.458336 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c02607d-5d74-4f4f-92c8-0aa57451a563-kube-api-access-g98vh" (OuterVolumeSpecName: "kube-api-access-g98vh") pod "7c02607d-5d74-4f4f-92c8-0aa57451a563" (UID: "7c02607d-5d74-4f4f-92c8-0aa57451a563"). InnerVolumeSpecName "kube-api-access-g98vh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:40:43 crc kubenswrapper[4854]: I1125 10:40:43.542259 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7c02607d-5d74-4f4f-92c8-0aa57451a563-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7c02607d-5d74-4f4f-92c8-0aa57451a563" (UID: "7c02607d-5d74-4f4f-92c8-0aa57451a563"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:40:43 crc kubenswrapper[4854]: I1125 10:40:43.554364 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7c02607d-5d74-4f4f-92c8-0aa57451a563-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:40:43 crc kubenswrapper[4854]: I1125 10:40:43.554410 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g98vh\" (UniqueName: \"kubernetes.io/projected/7c02607d-5d74-4f4f-92c8-0aa57451a563-kube-api-access-g98vh\") on node \"crc\" DevicePath \"\"" Nov 25 10:40:43 crc kubenswrapper[4854]: I1125 10:40:43.719079 4854 generic.go:334] "Generic (PLEG): container finished" podID="7c02607d-5d74-4f4f-92c8-0aa57451a563" containerID="37c346ba44070af50329c728a8adf27a51d761a9208711c94b5eba54a4884590" exitCode=0 Nov 25 10:40:43 crc kubenswrapper[4854]: I1125 10:40:43.719117 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7rns4" event={"ID":"7c02607d-5d74-4f4f-92c8-0aa57451a563","Type":"ContainerDied","Data":"37c346ba44070af50329c728a8adf27a51d761a9208711c94b5eba54a4884590"} Nov 25 10:40:43 crc kubenswrapper[4854]: I1125 10:40:43.719450 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7rns4" event={"ID":"7c02607d-5d74-4f4f-92c8-0aa57451a563","Type":"ContainerDied","Data":"3ef9e4479e5c0ce717ac9bdbc481f30438cdfcb623537ff7c5cfec7ee2759900"} Nov 25 10:40:43 crc kubenswrapper[4854]: I1125 10:40:43.719470 4854 scope.go:117] "RemoveContainer" containerID="37c346ba44070af50329c728a8adf27a51d761a9208711c94b5eba54a4884590" Nov 25 10:40:43 crc kubenswrapper[4854]: I1125 10:40:43.719172 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7rns4" Nov 25 10:40:43 crc kubenswrapper[4854]: I1125 10:40:43.756179 4854 scope.go:117] "RemoveContainer" containerID="5d61ab77f06916ce0422201b065cfdc3567b03ee156944a140928ae4b324a903" Nov 25 10:40:43 crc kubenswrapper[4854]: I1125 10:40:43.767239 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7rns4"] Nov 25 10:40:43 crc kubenswrapper[4854]: I1125 10:40:43.782553 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-7rns4"] Nov 25 10:40:43 crc kubenswrapper[4854]: I1125 10:40:43.799046 4854 scope.go:117] "RemoveContainer" containerID="c0e5a3acf98fa95fd6b1b7c15f77d80200724e9b2e3b33491a300e82c54743fe" Nov 25 10:40:43 crc kubenswrapper[4854]: I1125 10:40:43.867399 4854 scope.go:117] "RemoveContainer" containerID="37c346ba44070af50329c728a8adf27a51d761a9208711c94b5eba54a4884590" Nov 25 10:40:43 crc kubenswrapper[4854]: E1125 10:40:43.867872 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"37c346ba44070af50329c728a8adf27a51d761a9208711c94b5eba54a4884590\": container with ID starting with 37c346ba44070af50329c728a8adf27a51d761a9208711c94b5eba54a4884590 not found: ID does not exist" containerID="37c346ba44070af50329c728a8adf27a51d761a9208711c94b5eba54a4884590" Nov 25 10:40:43 crc kubenswrapper[4854]: I1125 10:40:43.867933 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"37c346ba44070af50329c728a8adf27a51d761a9208711c94b5eba54a4884590"} err="failed to get container status \"37c346ba44070af50329c728a8adf27a51d761a9208711c94b5eba54a4884590\": rpc error: code = NotFound desc = could not find container \"37c346ba44070af50329c728a8adf27a51d761a9208711c94b5eba54a4884590\": container with ID starting with 37c346ba44070af50329c728a8adf27a51d761a9208711c94b5eba54a4884590 not found: ID does not exist" Nov 25 10:40:43 crc kubenswrapper[4854]: I1125 10:40:43.867963 4854 scope.go:117] "RemoveContainer" containerID="5d61ab77f06916ce0422201b065cfdc3567b03ee156944a140928ae4b324a903" Nov 25 10:40:43 crc kubenswrapper[4854]: E1125 10:40:43.868345 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5d61ab77f06916ce0422201b065cfdc3567b03ee156944a140928ae4b324a903\": container with ID starting with 5d61ab77f06916ce0422201b065cfdc3567b03ee156944a140928ae4b324a903 not found: ID does not exist" containerID="5d61ab77f06916ce0422201b065cfdc3567b03ee156944a140928ae4b324a903" Nov 25 10:40:43 crc kubenswrapper[4854]: I1125 10:40:43.868387 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5d61ab77f06916ce0422201b065cfdc3567b03ee156944a140928ae4b324a903"} err="failed to get container status \"5d61ab77f06916ce0422201b065cfdc3567b03ee156944a140928ae4b324a903\": rpc error: code = NotFound desc = could not find container \"5d61ab77f06916ce0422201b065cfdc3567b03ee156944a140928ae4b324a903\": container with ID starting with 5d61ab77f06916ce0422201b065cfdc3567b03ee156944a140928ae4b324a903 not found: ID does not exist" Nov 25 10:40:43 crc kubenswrapper[4854]: I1125 10:40:43.868416 4854 scope.go:117] "RemoveContainer" containerID="c0e5a3acf98fa95fd6b1b7c15f77d80200724e9b2e3b33491a300e82c54743fe" Nov 25 10:40:43 crc kubenswrapper[4854]: E1125 10:40:43.868846 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c0e5a3acf98fa95fd6b1b7c15f77d80200724e9b2e3b33491a300e82c54743fe\": container with ID starting with c0e5a3acf98fa95fd6b1b7c15f77d80200724e9b2e3b33491a300e82c54743fe not found: ID does not exist" containerID="c0e5a3acf98fa95fd6b1b7c15f77d80200724e9b2e3b33491a300e82c54743fe" Nov 25 10:40:43 crc kubenswrapper[4854]: I1125 10:40:43.868884 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c0e5a3acf98fa95fd6b1b7c15f77d80200724e9b2e3b33491a300e82c54743fe"} err="failed to get container status \"c0e5a3acf98fa95fd6b1b7c15f77d80200724e9b2e3b33491a300e82c54743fe\": rpc error: code = NotFound desc = could not find container \"c0e5a3acf98fa95fd6b1b7c15f77d80200724e9b2e3b33491a300e82c54743fe\": container with ID starting with c0e5a3acf98fa95fd6b1b7c15f77d80200724e9b2e3b33491a300e82c54743fe not found: ID does not exist" Nov 25 10:40:44 crc kubenswrapper[4854]: I1125 10:40:44.013502 4854 scope.go:117] "RemoveContainer" containerID="edb84fd7a59ba5186401d63de1bc42d8be0434a868fd0d53853093404fcb425e" Nov 25 10:40:44 crc kubenswrapper[4854]: E1125 10:40:44.014074 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:40:45 crc kubenswrapper[4854]: I1125 10:40:45.048213 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7c02607d-5d74-4f4f-92c8-0aa57451a563" path="/var/lib/kubelet/pods/7c02607d-5d74-4f4f-92c8-0aa57451a563/volumes" Nov 25 10:40:59 crc kubenswrapper[4854]: I1125 10:40:59.013999 4854 scope.go:117] "RemoveContainer" containerID="edb84fd7a59ba5186401d63de1bc42d8be0434a868fd0d53853093404fcb425e" Nov 25 10:40:59 crc kubenswrapper[4854]: E1125 10:40:59.015033 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:41:14 crc kubenswrapper[4854]: I1125 10:41:14.014214 4854 scope.go:117] "RemoveContainer" containerID="edb84fd7a59ba5186401d63de1bc42d8be0434a868fd0d53853093404fcb425e" Nov 25 10:41:14 crc kubenswrapper[4854]: E1125 10:41:14.015127 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:41:25 crc kubenswrapper[4854]: I1125 10:41:25.983523 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-r7bnc"] Nov 25 10:41:25 crc kubenswrapper[4854]: E1125 10:41:25.984521 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c02607d-5d74-4f4f-92c8-0aa57451a563" containerName="extract-utilities" Nov 25 10:41:25 crc kubenswrapper[4854]: I1125 10:41:25.984536 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c02607d-5d74-4f4f-92c8-0aa57451a563" containerName="extract-utilities" Nov 25 10:41:25 crc kubenswrapper[4854]: E1125 10:41:25.984574 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c02607d-5d74-4f4f-92c8-0aa57451a563" containerName="registry-server" Nov 25 10:41:25 crc kubenswrapper[4854]: I1125 10:41:25.984580 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c02607d-5d74-4f4f-92c8-0aa57451a563" containerName="registry-server" Nov 25 10:41:25 crc kubenswrapper[4854]: E1125 10:41:25.984610 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c02607d-5d74-4f4f-92c8-0aa57451a563" containerName="extract-content" Nov 25 10:41:25 crc kubenswrapper[4854]: I1125 10:41:25.984616 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c02607d-5d74-4f4f-92c8-0aa57451a563" containerName="extract-content" Nov 25 10:41:25 crc kubenswrapper[4854]: I1125 10:41:25.984894 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c02607d-5d74-4f4f-92c8-0aa57451a563" containerName="registry-server" Nov 25 10:41:25 crc kubenswrapper[4854]: I1125 10:41:25.986702 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-r7bnc" Nov 25 10:41:25 crc kubenswrapper[4854]: I1125 10:41:25.999494 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-r7bnc"] Nov 25 10:41:26 crc kubenswrapper[4854]: I1125 10:41:26.151588 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f06228dd-a5d4-4b75-b1de-b6ffb638c5a4-catalog-content\") pod \"community-operators-r7bnc\" (UID: \"f06228dd-a5d4-4b75-b1de-b6ffb638c5a4\") " pod="openshift-marketplace/community-operators-r7bnc" Nov 25 10:41:26 crc kubenswrapper[4854]: I1125 10:41:26.151772 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f06228dd-a5d4-4b75-b1de-b6ffb638c5a4-utilities\") pod \"community-operators-r7bnc\" (UID: \"f06228dd-a5d4-4b75-b1de-b6ffb638c5a4\") " pod="openshift-marketplace/community-operators-r7bnc" Nov 25 10:41:26 crc kubenswrapper[4854]: I1125 10:41:26.152557 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qbnr4\" (UniqueName: \"kubernetes.io/projected/f06228dd-a5d4-4b75-b1de-b6ffb638c5a4-kube-api-access-qbnr4\") pod \"community-operators-r7bnc\" (UID: \"f06228dd-a5d4-4b75-b1de-b6ffb638c5a4\") " pod="openshift-marketplace/community-operators-r7bnc" Nov 25 10:41:26 crc kubenswrapper[4854]: I1125 10:41:26.255578 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f06228dd-a5d4-4b75-b1de-b6ffb638c5a4-catalog-content\") pod \"community-operators-r7bnc\" (UID: \"f06228dd-a5d4-4b75-b1de-b6ffb638c5a4\") " pod="openshift-marketplace/community-operators-r7bnc" Nov 25 10:41:26 crc kubenswrapper[4854]: I1125 10:41:26.255636 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f06228dd-a5d4-4b75-b1de-b6ffb638c5a4-utilities\") pod \"community-operators-r7bnc\" (UID: \"f06228dd-a5d4-4b75-b1de-b6ffb638c5a4\") " pod="openshift-marketplace/community-operators-r7bnc" Nov 25 10:41:26 crc kubenswrapper[4854]: I1125 10:41:26.255762 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qbnr4\" (UniqueName: \"kubernetes.io/projected/f06228dd-a5d4-4b75-b1de-b6ffb638c5a4-kube-api-access-qbnr4\") pod \"community-operators-r7bnc\" (UID: \"f06228dd-a5d4-4b75-b1de-b6ffb638c5a4\") " pod="openshift-marketplace/community-operators-r7bnc" Nov 25 10:41:26 crc kubenswrapper[4854]: I1125 10:41:26.256109 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f06228dd-a5d4-4b75-b1de-b6ffb638c5a4-catalog-content\") pod \"community-operators-r7bnc\" (UID: \"f06228dd-a5d4-4b75-b1de-b6ffb638c5a4\") " pod="openshift-marketplace/community-operators-r7bnc" Nov 25 10:41:26 crc kubenswrapper[4854]: I1125 10:41:26.256298 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f06228dd-a5d4-4b75-b1de-b6ffb638c5a4-utilities\") pod \"community-operators-r7bnc\" (UID: \"f06228dd-a5d4-4b75-b1de-b6ffb638c5a4\") " pod="openshift-marketplace/community-operators-r7bnc" Nov 25 10:41:26 crc kubenswrapper[4854]: I1125 10:41:26.276316 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qbnr4\" (UniqueName: \"kubernetes.io/projected/f06228dd-a5d4-4b75-b1de-b6ffb638c5a4-kube-api-access-qbnr4\") pod \"community-operators-r7bnc\" (UID: \"f06228dd-a5d4-4b75-b1de-b6ffb638c5a4\") " pod="openshift-marketplace/community-operators-r7bnc" Nov 25 10:41:26 crc kubenswrapper[4854]: I1125 10:41:26.312405 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-r7bnc" Nov 25 10:41:26 crc kubenswrapper[4854]: I1125 10:41:26.941595 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-r7bnc"] Nov 25 10:41:27 crc kubenswrapper[4854]: I1125 10:41:27.319081 4854 generic.go:334] "Generic (PLEG): container finished" podID="f06228dd-a5d4-4b75-b1de-b6ffb638c5a4" containerID="f45c8d7a7f8deef8cbf217c716bad50d2ed670b1dca69a462e8bd31b2b58dc99" exitCode=0 Nov 25 10:41:27 crc kubenswrapper[4854]: I1125 10:41:27.319134 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r7bnc" event={"ID":"f06228dd-a5d4-4b75-b1de-b6ffb638c5a4","Type":"ContainerDied","Data":"f45c8d7a7f8deef8cbf217c716bad50d2ed670b1dca69a462e8bd31b2b58dc99"} Nov 25 10:41:27 crc kubenswrapper[4854]: I1125 10:41:27.320164 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r7bnc" event={"ID":"f06228dd-a5d4-4b75-b1de-b6ffb638c5a4","Type":"ContainerStarted","Data":"305d170cb1d823d162554f6b50f94b9539f8887c2e338c8d0bab843a688747d9"} Nov 25 10:41:29 crc kubenswrapper[4854]: I1125 10:41:29.014909 4854 scope.go:117] "RemoveContainer" containerID="edb84fd7a59ba5186401d63de1bc42d8be0434a868fd0d53853093404fcb425e" Nov 25 10:41:29 crc kubenswrapper[4854]: E1125 10:41:29.016074 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:41:29 crc kubenswrapper[4854]: I1125 10:41:29.356581 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r7bnc" event={"ID":"f06228dd-a5d4-4b75-b1de-b6ffb638c5a4","Type":"ContainerStarted","Data":"3ace26983e7d28524aa6201c2feddacf998747d67702e77bb9ce72a4034b9b03"} Nov 25 10:41:30 crc kubenswrapper[4854]: I1125 10:41:30.371901 4854 generic.go:334] "Generic (PLEG): container finished" podID="f06228dd-a5d4-4b75-b1de-b6ffb638c5a4" containerID="3ace26983e7d28524aa6201c2feddacf998747d67702e77bb9ce72a4034b9b03" exitCode=0 Nov 25 10:41:30 crc kubenswrapper[4854]: I1125 10:41:30.371998 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r7bnc" event={"ID":"f06228dd-a5d4-4b75-b1de-b6ffb638c5a4","Type":"ContainerDied","Data":"3ace26983e7d28524aa6201c2feddacf998747d67702e77bb9ce72a4034b9b03"} Nov 25 10:41:32 crc kubenswrapper[4854]: I1125 10:41:32.407601 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r7bnc" event={"ID":"f06228dd-a5d4-4b75-b1de-b6ffb638c5a4","Type":"ContainerStarted","Data":"a441b70ebad4c6cc74e1723cd69c8025b93e55480feb90ffb4bce716913e8717"} Nov 25 10:41:32 crc kubenswrapper[4854]: I1125 10:41:32.449400 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-r7bnc" podStartSLOduration=3.013464102 podStartE2EDuration="7.449373521s" podCreationTimestamp="2025-11-25 10:41:25 +0000 UTC" firstStartedPulling="2025-11-25 10:41:27.32206678 +0000 UTC m=+3893.175060156" lastFinishedPulling="2025-11-25 10:41:31.757976199 +0000 UTC m=+3897.610969575" observedRunningTime="2025-11-25 10:41:32.433838157 +0000 UTC m=+3898.286831613" watchObservedRunningTime="2025-11-25 10:41:32.449373521 +0000 UTC m=+3898.302366907" Nov 25 10:41:36 crc kubenswrapper[4854]: I1125 10:41:36.313330 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-r7bnc" Nov 25 10:41:36 crc kubenswrapper[4854]: I1125 10:41:36.313918 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-r7bnc" Nov 25 10:41:36 crc kubenswrapper[4854]: I1125 10:41:36.396815 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-r7bnc" Nov 25 10:41:44 crc kubenswrapper[4854]: I1125 10:41:44.014568 4854 scope.go:117] "RemoveContainer" containerID="edb84fd7a59ba5186401d63de1bc42d8be0434a868fd0d53853093404fcb425e" Nov 25 10:41:44 crc kubenswrapper[4854]: E1125 10:41:44.015386 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:41:46 crc kubenswrapper[4854]: I1125 10:41:46.379019 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-r7bnc" Nov 25 10:41:46 crc kubenswrapper[4854]: I1125 10:41:46.438286 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-r7bnc"] Nov 25 10:41:46 crc kubenswrapper[4854]: I1125 10:41:46.595928 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-r7bnc" podUID="f06228dd-a5d4-4b75-b1de-b6ffb638c5a4" containerName="registry-server" containerID="cri-o://a441b70ebad4c6cc74e1723cd69c8025b93e55480feb90ffb4bce716913e8717" gracePeriod=2 Nov 25 10:41:47 crc kubenswrapper[4854]: I1125 10:41:47.167255 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-r7bnc" Nov 25 10:41:47 crc kubenswrapper[4854]: I1125 10:41:47.260181 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f06228dd-a5d4-4b75-b1de-b6ffb638c5a4-catalog-content\") pod \"f06228dd-a5d4-4b75-b1de-b6ffb638c5a4\" (UID: \"f06228dd-a5d4-4b75-b1de-b6ffb638c5a4\") " Nov 25 10:41:47 crc kubenswrapper[4854]: I1125 10:41:47.260346 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f06228dd-a5d4-4b75-b1de-b6ffb638c5a4-utilities\") pod \"f06228dd-a5d4-4b75-b1de-b6ffb638c5a4\" (UID: \"f06228dd-a5d4-4b75-b1de-b6ffb638c5a4\") " Nov 25 10:41:47 crc kubenswrapper[4854]: I1125 10:41:47.260475 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qbnr4\" (UniqueName: \"kubernetes.io/projected/f06228dd-a5d4-4b75-b1de-b6ffb638c5a4-kube-api-access-qbnr4\") pod \"f06228dd-a5d4-4b75-b1de-b6ffb638c5a4\" (UID: \"f06228dd-a5d4-4b75-b1de-b6ffb638c5a4\") " Nov 25 10:41:47 crc kubenswrapper[4854]: I1125 10:41:47.261145 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f06228dd-a5d4-4b75-b1de-b6ffb638c5a4-utilities" (OuterVolumeSpecName: "utilities") pod "f06228dd-a5d4-4b75-b1de-b6ffb638c5a4" (UID: "f06228dd-a5d4-4b75-b1de-b6ffb638c5a4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:41:47 crc kubenswrapper[4854]: I1125 10:41:47.261759 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f06228dd-a5d4-4b75-b1de-b6ffb638c5a4-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:41:47 crc kubenswrapper[4854]: I1125 10:41:47.268438 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f06228dd-a5d4-4b75-b1de-b6ffb638c5a4-kube-api-access-qbnr4" (OuterVolumeSpecName: "kube-api-access-qbnr4") pod "f06228dd-a5d4-4b75-b1de-b6ffb638c5a4" (UID: "f06228dd-a5d4-4b75-b1de-b6ffb638c5a4"). InnerVolumeSpecName "kube-api-access-qbnr4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:41:47 crc kubenswrapper[4854]: I1125 10:41:47.331529 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f06228dd-a5d4-4b75-b1de-b6ffb638c5a4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f06228dd-a5d4-4b75-b1de-b6ffb638c5a4" (UID: "f06228dd-a5d4-4b75-b1de-b6ffb638c5a4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:41:47 crc kubenswrapper[4854]: I1125 10:41:47.363552 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qbnr4\" (UniqueName: \"kubernetes.io/projected/f06228dd-a5d4-4b75-b1de-b6ffb638c5a4-kube-api-access-qbnr4\") on node \"crc\" DevicePath \"\"" Nov 25 10:41:47 crc kubenswrapper[4854]: I1125 10:41:47.363596 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f06228dd-a5d4-4b75-b1de-b6ffb638c5a4-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:41:47 crc kubenswrapper[4854]: I1125 10:41:47.605437 4854 generic.go:334] "Generic (PLEG): container finished" podID="f06228dd-a5d4-4b75-b1de-b6ffb638c5a4" containerID="a441b70ebad4c6cc74e1723cd69c8025b93e55480feb90ffb4bce716913e8717" exitCode=0 Nov 25 10:41:47 crc kubenswrapper[4854]: I1125 10:41:47.605480 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r7bnc" event={"ID":"f06228dd-a5d4-4b75-b1de-b6ffb638c5a4","Type":"ContainerDied","Data":"a441b70ebad4c6cc74e1723cd69c8025b93e55480feb90ffb4bce716913e8717"} Nov 25 10:41:47 crc kubenswrapper[4854]: I1125 10:41:47.605760 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r7bnc" event={"ID":"f06228dd-a5d4-4b75-b1de-b6ffb638c5a4","Type":"ContainerDied","Data":"305d170cb1d823d162554f6b50f94b9539f8887c2e338c8d0bab843a688747d9"} Nov 25 10:41:47 crc kubenswrapper[4854]: I1125 10:41:47.605785 4854 scope.go:117] "RemoveContainer" containerID="a441b70ebad4c6cc74e1723cd69c8025b93e55480feb90ffb4bce716913e8717" Nov 25 10:41:47 crc kubenswrapper[4854]: I1125 10:41:47.605507 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-r7bnc" Nov 25 10:41:47 crc kubenswrapper[4854]: I1125 10:41:47.623962 4854 scope.go:117] "RemoveContainer" containerID="3ace26983e7d28524aa6201c2feddacf998747d67702e77bb9ce72a4034b9b03" Nov 25 10:41:47 crc kubenswrapper[4854]: I1125 10:41:47.641127 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-r7bnc"] Nov 25 10:41:47 crc kubenswrapper[4854]: I1125 10:41:47.652477 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-r7bnc"] Nov 25 10:41:47 crc kubenswrapper[4854]: I1125 10:41:47.657543 4854 scope.go:117] "RemoveContainer" containerID="f45c8d7a7f8deef8cbf217c716bad50d2ed670b1dca69a462e8bd31b2b58dc99" Nov 25 10:41:47 crc kubenswrapper[4854]: I1125 10:41:47.718994 4854 scope.go:117] "RemoveContainer" containerID="a441b70ebad4c6cc74e1723cd69c8025b93e55480feb90ffb4bce716913e8717" Nov 25 10:41:47 crc kubenswrapper[4854]: E1125 10:41:47.720187 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a441b70ebad4c6cc74e1723cd69c8025b93e55480feb90ffb4bce716913e8717\": container with ID starting with a441b70ebad4c6cc74e1723cd69c8025b93e55480feb90ffb4bce716913e8717 not found: ID does not exist" containerID="a441b70ebad4c6cc74e1723cd69c8025b93e55480feb90ffb4bce716913e8717" Nov 25 10:41:47 crc kubenswrapper[4854]: I1125 10:41:47.720244 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a441b70ebad4c6cc74e1723cd69c8025b93e55480feb90ffb4bce716913e8717"} err="failed to get container status \"a441b70ebad4c6cc74e1723cd69c8025b93e55480feb90ffb4bce716913e8717\": rpc error: code = NotFound desc = could not find container \"a441b70ebad4c6cc74e1723cd69c8025b93e55480feb90ffb4bce716913e8717\": container with ID starting with a441b70ebad4c6cc74e1723cd69c8025b93e55480feb90ffb4bce716913e8717 not found: ID does not exist" Nov 25 10:41:47 crc kubenswrapper[4854]: I1125 10:41:47.720278 4854 scope.go:117] "RemoveContainer" containerID="3ace26983e7d28524aa6201c2feddacf998747d67702e77bb9ce72a4034b9b03" Nov 25 10:41:47 crc kubenswrapper[4854]: E1125 10:41:47.720809 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3ace26983e7d28524aa6201c2feddacf998747d67702e77bb9ce72a4034b9b03\": container with ID starting with 3ace26983e7d28524aa6201c2feddacf998747d67702e77bb9ce72a4034b9b03 not found: ID does not exist" containerID="3ace26983e7d28524aa6201c2feddacf998747d67702e77bb9ce72a4034b9b03" Nov 25 10:41:47 crc kubenswrapper[4854]: I1125 10:41:47.720840 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3ace26983e7d28524aa6201c2feddacf998747d67702e77bb9ce72a4034b9b03"} err="failed to get container status \"3ace26983e7d28524aa6201c2feddacf998747d67702e77bb9ce72a4034b9b03\": rpc error: code = NotFound desc = could not find container \"3ace26983e7d28524aa6201c2feddacf998747d67702e77bb9ce72a4034b9b03\": container with ID starting with 3ace26983e7d28524aa6201c2feddacf998747d67702e77bb9ce72a4034b9b03 not found: ID does not exist" Nov 25 10:41:47 crc kubenswrapper[4854]: I1125 10:41:47.720860 4854 scope.go:117] "RemoveContainer" containerID="f45c8d7a7f8deef8cbf217c716bad50d2ed670b1dca69a462e8bd31b2b58dc99" Nov 25 10:41:47 crc kubenswrapper[4854]: E1125 10:41:47.721117 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f45c8d7a7f8deef8cbf217c716bad50d2ed670b1dca69a462e8bd31b2b58dc99\": container with ID starting with f45c8d7a7f8deef8cbf217c716bad50d2ed670b1dca69a462e8bd31b2b58dc99 not found: ID does not exist" containerID="f45c8d7a7f8deef8cbf217c716bad50d2ed670b1dca69a462e8bd31b2b58dc99" Nov 25 10:41:47 crc kubenswrapper[4854]: I1125 10:41:47.721143 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f45c8d7a7f8deef8cbf217c716bad50d2ed670b1dca69a462e8bd31b2b58dc99"} err="failed to get container status \"f45c8d7a7f8deef8cbf217c716bad50d2ed670b1dca69a462e8bd31b2b58dc99\": rpc error: code = NotFound desc = could not find container \"f45c8d7a7f8deef8cbf217c716bad50d2ed670b1dca69a462e8bd31b2b58dc99\": container with ID starting with f45c8d7a7f8deef8cbf217c716bad50d2ed670b1dca69a462e8bd31b2b58dc99 not found: ID does not exist" Nov 25 10:41:49 crc kubenswrapper[4854]: I1125 10:41:49.046953 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f06228dd-a5d4-4b75-b1de-b6ffb638c5a4" path="/var/lib/kubelet/pods/f06228dd-a5d4-4b75-b1de-b6ffb638c5a4/volumes" Nov 25 10:41:59 crc kubenswrapper[4854]: I1125 10:41:59.014468 4854 scope.go:117] "RemoveContainer" containerID="edb84fd7a59ba5186401d63de1bc42d8be0434a868fd0d53853093404fcb425e" Nov 25 10:41:59 crc kubenswrapper[4854]: E1125 10:41:59.015407 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:42:12 crc kubenswrapper[4854]: I1125 10:42:12.014245 4854 scope.go:117] "RemoveContainer" containerID="edb84fd7a59ba5186401d63de1bc42d8be0434a868fd0d53853093404fcb425e" Nov 25 10:42:12 crc kubenswrapper[4854]: E1125 10:42:12.015666 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:42:23 crc kubenswrapper[4854]: I1125 10:42:23.014653 4854 scope.go:117] "RemoveContainer" containerID="edb84fd7a59ba5186401d63de1bc42d8be0434a868fd0d53853093404fcb425e" Nov 25 10:42:23 crc kubenswrapper[4854]: E1125 10:42:23.015734 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:42:37 crc kubenswrapper[4854]: I1125 10:42:37.023293 4854 scope.go:117] "RemoveContainer" containerID="edb84fd7a59ba5186401d63de1bc42d8be0434a868fd0d53853093404fcb425e" Nov 25 10:42:47 crc kubenswrapper[4854]: I1125 10:42:47.359652 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerStarted","Data":"b762ab57006f226c5e2e937bd59d5ef221def4e09151901c27f3a4f89a5f39c1"} Nov 25 10:42:51 crc kubenswrapper[4854]: I1125 10:42:51.815659 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-fpmfz"] Nov 25 10:42:51 crc kubenswrapper[4854]: E1125 10:42:51.817628 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f06228dd-a5d4-4b75-b1de-b6ffb638c5a4" containerName="registry-server" Nov 25 10:42:51 crc kubenswrapper[4854]: I1125 10:42:51.817733 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="f06228dd-a5d4-4b75-b1de-b6ffb638c5a4" containerName="registry-server" Nov 25 10:42:51 crc kubenswrapper[4854]: E1125 10:42:51.817837 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f06228dd-a5d4-4b75-b1de-b6ffb638c5a4" containerName="extract-utilities" Nov 25 10:42:51 crc kubenswrapper[4854]: I1125 10:42:51.817927 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="f06228dd-a5d4-4b75-b1de-b6ffb638c5a4" containerName="extract-utilities" Nov 25 10:42:51 crc kubenswrapper[4854]: E1125 10:42:51.818026 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f06228dd-a5d4-4b75-b1de-b6ffb638c5a4" containerName="extract-content" Nov 25 10:42:51 crc kubenswrapper[4854]: I1125 10:42:51.818083 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="f06228dd-a5d4-4b75-b1de-b6ffb638c5a4" containerName="extract-content" Nov 25 10:42:51 crc kubenswrapper[4854]: I1125 10:42:51.818402 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="f06228dd-a5d4-4b75-b1de-b6ffb638c5a4" containerName="registry-server" Nov 25 10:42:51 crc kubenswrapper[4854]: I1125 10:42:51.820816 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fpmfz" Nov 25 10:42:51 crc kubenswrapper[4854]: I1125 10:42:51.835716 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fpmfz"] Nov 25 10:42:51 crc kubenswrapper[4854]: I1125 10:42:51.839929 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gzqzm\" (UniqueName: \"kubernetes.io/projected/cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7-kube-api-access-gzqzm\") pod \"redhat-marketplace-fpmfz\" (UID: \"cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7\") " pod="openshift-marketplace/redhat-marketplace-fpmfz" Nov 25 10:42:51 crc kubenswrapper[4854]: I1125 10:42:51.840653 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7-catalog-content\") pod \"redhat-marketplace-fpmfz\" (UID: \"cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7\") " pod="openshift-marketplace/redhat-marketplace-fpmfz" Nov 25 10:42:51 crc kubenswrapper[4854]: I1125 10:42:51.840870 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7-utilities\") pod \"redhat-marketplace-fpmfz\" (UID: \"cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7\") " pod="openshift-marketplace/redhat-marketplace-fpmfz" Nov 25 10:42:51 crc kubenswrapper[4854]: I1125 10:42:51.942568 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7-catalog-content\") pod \"redhat-marketplace-fpmfz\" (UID: \"cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7\") " pod="openshift-marketplace/redhat-marketplace-fpmfz" Nov 25 10:42:51 crc kubenswrapper[4854]: I1125 10:42:51.942991 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7-utilities\") pod \"redhat-marketplace-fpmfz\" (UID: \"cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7\") " pod="openshift-marketplace/redhat-marketplace-fpmfz" Nov 25 10:42:51 crc kubenswrapper[4854]: I1125 10:42:51.943114 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gzqzm\" (UniqueName: \"kubernetes.io/projected/cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7-kube-api-access-gzqzm\") pod \"redhat-marketplace-fpmfz\" (UID: \"cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7\") " pod="openshift-marketplace/redhat-marketplace-fpmfz" Nov 25 10:42:51 crc kubenswrapper[4854]: I1125 10:42:51.943529 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7-catalog-content\") pod \"redhat-marketplace-fpmfz\" (UID: \"cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7\") " pod="openshift-marketplace/redhat-marketplace-fpmfz" Nov 25 10:42:51 crc kubenswrapper[4854]: I1125 10:42:51.944121 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7-utilities\") pod \"redhat-marketplace-fpmfz\" (UID: \"cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7\") " pod="openshift-marketplace/redhat-marketplace-fpmfz" Nov 25 10:42:51 crc kubenswrapper[4854]: I1125 10:42:51.964086 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gzqzm\" (UniqueName: \"kubernetes.io/projected/cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7-kube-api-access-gzqzm\") pod \"redhat-marketplace-fpmfz\" (UID: \"cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7\") " pod="openshift-marketplace/redhat-marketplace-fpmfz" Nov 25 10:42:52 crc kubenswrapper[4854]: I1125 10:42:52.188721 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fpmfz" Nov 25 10:42:52 crc kubenswrapper[4854]: I1125 10:42:52.716894 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fpmfz"] Nov 25 10:42:53 crc kubenswrapper[4854]: I1125 10:42:53.447541 4854 generic.go:334] "Generic (PLEG): container finished" podID="cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7" containerID="a4b8c5716e3c03630ad9a79cd211e01d9af938c76a8e5ded4481258810adcb37" exitCode=0 Nov 25 10:42:53 crc kubenswrapper[4854]: I1125 10:42:53.447655 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fpmfz" event={"ID":"cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7","Type":"ContainerDied","Data":"a4b8c5716e3c03630ad9a79cd211e01d9af938c76a8e5ded4481258810adcb37"} Nov 25 10:42:53 crc kubenswrapper[4854]: I1125 10:42:53.449107 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fpmfz" event={"ID":"cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7","Type":"ContainerStarted","Data":"7436ab549e1acc0a77587fccc7ad493684c175220a34a5d9baf36da8437b5d25"} Nov 25 10:42:55 crc kubenswrapper[4854]: I1125 10:42:55.477255 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fpmfz" event={"ID":"cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7","Type":"ContainerStarted","Data":"6aa446a10aae1c85f3a92cc166717d28c5bd65aa3003afbcfd4fb192d5e45068"} Nov 25 10:42:56 crc kubenswrapper[4854]: I1125 10:42:56.494645 4854 generic.go:334] "Generic (PLEG): container finished" podID="cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7" containerID="6aa446a10aae1c85f3a92cc166717d28c5bd65aa3003afbcfd4fb192d5e45068" exitCode=0 Nov 25 10:42:56 crc kubenswrapper[4854]: I1125 10:42:56.494730 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fpmfz" event={"ID":"cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7","Type":"ContainerDied","Data":"6aa446a10aae1c85f3a92cc166717d28c5bd65aa3003afbcfd4fb192d5e45068"} Nov 25 10:42:57 crc kubenswrapper[4854]: I1125 10:42:57.513643 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fpmfz" event={"ID":"cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7","Type":"ContainerStarted","Data":"ed0908734c19fd16e380e748b26df6f555721217b681837db7e4d2525401509b"} Nov 25 10:42:57 crc kubenswrapper[4854]: I1125 10:42:57.554255 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-fpmfz" podStartSLOduration=3.077157782 podStartE2EDuration="6.554236349s" podCreationTimestamp="2025-11-25 10:42:51 +0000 UTC" firstStartedPulling="2025-11-25 10:42:53.449978335 +0000 UTC m=+3979.302971711" lastFinishedPulling="2025-11-25 10:42:56.927056902 +0000 UTC m=+3982.780050278" observedRunningTime="2025-11-25 10:42:57.535149556 +0000 UTC m=+3983.388142962" watchObservedRunningTime="2025-11-25 10:42:57.554236349 +0000 UTC m=+3983.407229725" Nov 25 10:43:02 crc kubenswrapper[4854]: I1125 10:43:02.189587 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-fpmfz" Nov 25 10:43:02 crc kubenswrapper[4854]: I1125 10:43:02.190110 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-fpmfz" Nov 25 10:43:02 crc kubenswrapper[4854]: I1125 10:43:02.259625 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-fpmfz" Nov 25 10:43:02 crc kubenswrapper[4854]: I1125 10:43:02.751937 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-fpmfz" Nov 25 10:43:02 crc kubenswrapper[4854]: I1125 10:43:02.887880 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fpmfz"] Nov 25 10:43:04 crc kubenswrapper[4854]: I1125 10:43:04.634695 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-fpmfz" podUID="cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7" containerName="registry-server" containerID="cri-o://ed0908734c19fd16e380e748b26df6f555721217b681837db7e4d2525401509b" gracePeriod=2 Nov 25 10:43:05 crc kubenswrapper[4854]: I1125 10:43:05.646265 4854 generic.go:334] "Generic (PLEG): container finished" podID="cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7" containerID="ed0908734c19fd16e380e748b26df6f555721217b681837db7e4d2525401509b" exitCode=0 Nov 25 10:43:05 crc kubenswrapper[4854]: I1125 10:43:05.646322 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fpmfz" event={"ID":"cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7","Type":"ContainerDied","Data":"ed0908734c19fd16e380e748b26df6f555721217b681837db7e4d2525401509b"} Nov 25 10:43:05 crc kubenswrapper[4854]: I1125 10:43:05.907213 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fpmfz" Nov 25 10:43:06 crc kubenswrapper[4854]: I1125 10:43:06.026309 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7-catalog-content\") pod \"cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7\" (UID: \"cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7\") " Nov 25 10:43:06 crc kubenswrapper[4854]: I1125 10:43:06.026456 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7-utilities\") pod \"cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7\" (UID: \"cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7\") " Nov 25 10:43:06 crc kubenswrapper[4854]: I1125 10:43:06.026563 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gzqzm\" (UniqueName: \"kubernetes.io/projected/cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7-kube-api-access-gzqzm\") pod \"cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7\" (UID: \"cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7\") " Nov 25 10:43:06 crc kubenswrapper[4854]: I1125 10:43:06.028072 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7-utilities" (OuterVolumeSpecName: "utilities") pod "cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7" (UID: "cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:43:06 crc kubenswrapper[4854]: I1125 10:43:06.034010 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7-kube-api-access-gzqzm" (OuterVolumeSpecName: "kube-api-access-gzqzm") pod "cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7" (UID: "cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7"). InnerVolumeSpecName "kube-api-access-gzqzm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:43:06 crc kubenswrapper[4854]: I1125 10:43:06.053382 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7" (UID: "cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:43:06 crc kubenswrapper[4854]: I1125 10:43:06.130177 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:43:06 crc kubenswrapper[4854]: I1125 10:43:06.130252 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:43:06 crc kubenswrapper[4854]: I1125 10:43:06.130273 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gzqzm\" (UniqueName: \"kubernetes.io/projected/cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7-kube-api-access-gzqzm\") on node \"crc\" DevicePath \"\"" Nov 25 10:43:06 crc kubenswrapper[4854]: I1125 10:43:06.671476 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fpmfz" event={"ID":"cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7","Type":"ContainerDied","Data":"7436ab549e1acc0a77587fccc7ad493684c175220a34a5d9baf36da8437b5d25"} Nov 25 10:43:06 crc kubenswrapper[4854]: I1125 10:43:06.671618 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fpmfz" Nov 25 10:43:06 crc kubenswrapper[4854]: I1125 10:43:06.671855 4854 scope.go:117] "RemoveContainer" containerID="ed0908734c19fd16e380e748b26df6f555721217b681837db7e4d2525401509b" Nov 25 10:43:06 crc kubenswrapper[4854]: I1125 10:43:06.704964 4854 scope.go:117] "RemoveContainer" containerID="6aa446a10aae1c85f3a92cc166717d28c5bd65aa3003afbcfd4fb192d5e45068" Nov 25 10:43:06 crc kubenswrapper[4854]: I1125 10:43:06.733249 4854 scope.go:117] "RemoveContainer" containerID="a4b8c5716e3c03630ad9a79cd211e01d9af938c76a8e5ded4481258810adcb37" Nov 25 10:43:06 crc kubenswrapper[4854]: I1125 10:43:06.764757 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fpmfz"] Nov 25 10:43:06 crc kubenswrapper[4854]: I1125 10:43:06.775336 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-fpmfz"] Nov 25 10:43:07 crc kubenswrapper[4854]: I1125 10:43:07.025683 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7" path="/var/lib/kubelet/pods/cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7/volumes" Nov 25 10:43:24 crc kubenswrapper[4854]: E1125 10:43:24.103157 4854 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.184:33386->38.102.83.184:43333: write tcp 38.102.83.184:33386->38.102.83.184:43333: write: broken pipe Nov 25 10:43:31 crc kubenswrapper[4854]: E1125 10:43:31.456441 4854 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.184:52786->38.102.83.184:43333: write tcp 38.102.83.184:52786->38.102.83.184:43333: write: broken pipe Nov 25 10:44:55 crc kubenswrapper[4854]: I1125 10:44:55.032537 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:44:55 crc kubenswrapper[4854]: I1125 10:44:55.033123 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:45:00 crc kubenswrapper[4854]: I1125 10:45:00.354613 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401125-6hftp"] Nov 25 10:45:00 crc kubenswrapper[4854]: E1125 10:45:00.361374 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7" containerName="extract-content" Nov 25 10:45:00 crc kubenswrapper[4854]: I1125 10:45:00.361406 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7" containerName="extract-content" Nov 25 10:45:00 crc kubenswrapper[4854]: E1125 10:45:00.361470 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7" containerName="registry-server" Nov 25 10:45:00 crc kubenswrapper[4854]: I1125 10:45:00.361481 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7" containerName="registry-server" Nov 25 10:45:00 crc kubenswrapper[4854]: E1125 10:45:00.361539 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7" containerName="extract-utilities" Nov 25 10:45:00 crc kubenswrapper[4854]: I1125 10:45:00.361549 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7" containerName="extract-utilities" Nov 25 10:45:00 crc kubenswrapper[4854]: I1125 10:45:00.361888 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="cde8a1f1-c93f-4c23-89f1-f5caee2a4ca7" containerName="registry-server" Nov 25 10:45:00 crc kubenswrapper[4854]: I1125 10:45:00.363220 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-6hftp" Nov 25 10:45:00 crc kubenswrapper[4854]: I1125 10:45:00.367369 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 10:45:00 crc kubenswrapper[4854]: I1125 10:45:00.367703 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 10:45:00 crc kubenswrapper[4854]: I1125 10:45:00.378090 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401125-6hftp"] Nov 25 10:45:00 crc kubenswrapper[4854]: I1125 10:45:00.395546 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a-config-volume\") pod \"collect-profiles-29401125-6hftp\" (UID: \"2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-6hftp" Nov 25 10:45:00 crc kubenswrapper[4854]: I1125 10:45:00.395761 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a-secret-volume\") pod \"collect-profiles-29401125-6hftp\" (UID: \"2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-6hftp" Nov 25 10:45:00 crc kubenswrapper[4854]: I1125 10:45:00.395945 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vxsx8\" (UniqueName: \"kubernetes.io/projected/2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a-kube-api-access-vxsx8\") pod \"collect-profiles-29401125-6hftp\" (UID: \"2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-6hftp" Nov 25 10:45:00 crc kubenswrapper[4854]: I1125 10:45:00.498350 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vxsx8\" (UniqueName: \"kubernetes.io/projected/2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a-kube-api-access-vxsx8\") pod \"collect-profiles-29401125-6hftp\" (UID: \"2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-6hftp" Nov 25 10:45:00 crc kubenswrapper[4854]: I1125 10:45:00.499146 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a-config-volume\") pod \"collect-profiles-29401125-6hftp\" (UID: \"2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-6hftp" Nov 25 10:45:00 crc kubenswrapper[4854]: I1125 10:45:00.499373 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a-secret-volume\") pod \"collect-profiles-29401125-6hftp\" (UID: \"2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-6hftp" Nov 25 10:45:00 crc kubenswrapper[4854]: I1125 10:45:00.500214 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a-config-volume\") pod \"collect-profiles-29401125-6hftp\" (UID: \"2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-6hftp" Nov 25 10:45:00 crc kubenswrapper[4854]: I1125 10:45:00.505640 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a-secret-volume\") pod \"collect-profiles-29401125-6hftp\" (UID: \"2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-6hftp" Nov 25 10:45:00 crc kubenswrapper[4854]: I1125 10:45:00.521455 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vxsx8\" (UniqueName: \"kubernetes.io/projected/2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a-kube-api-access-vxsx8\") pod \"collect-profiles-29401125-6hftp\" (UID: \"2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-6hftp" Nov 25 10:45:00 crc kubenswrapper[4854]: I1125 10:45:00.697075 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-6hftp" Nov 25 10:45:01 crc kubenswrapper[4854]: I1125 10:45:01.255423 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401125-6hftp"] Nov 25 10:45:01 crc kubenswrapper[4854]: W1125 10:45:01.256658 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2b3ba2fb_2c4f_4ed6_a62f_abd9c752387a.slice/crio-02840707e07706a90ed67744ff739833629fd19e29d139d8f621f4c3297f14c9 WatchSource:0}: Error finding container 02840707e07706a90ed67744ff739833629fd19e29d139d8f621f4c3297f14c9: Status 404 returned error can't find the container with id 02840707e07706a90ed67744ff739833629fd19e29d139d8f621f4c3297f14c9 Nov 25 10:45:01 crc kubenswrapper[4854]: I1125 10:45:01.821231 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-6hftp" event={"ID":"2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a","Type":"ContainerStarted","Data":"4c50fd5b1ef7224513e87fdd8542a92637bec70acd5fb40fcf2ed26b9675dd04"} Nov 25 10:45:01 crc kubenswrapper[4854]: I1125 10:45:01.821780 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-6hftp" event={"ID":"2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a","Type":"ContainerStarted","Data":"02840707e07706a90ed67744ff739833629fd19e29d139d8f621f4c3297f14c9"} Nov 25 10:45:01 crc kubenswrapper[4854]: I1125 10:45:01.851344 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-6hftp" podStartSLOduration=1.8513197529999998 podStartE2EDuration="1.851319753s" podCreationTimestamp="2025-11-25 10:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:45:01.842153142 +0000 UTC m=+4107.695146518" watchObservedRunningTime="2025-11-25 10:45:01.851319753 +0000 UTC m=+4107.704313139" Nov 25 10:45:02 crc kubenswrapper[4854]: I1125 10:45:02.837405 4854 generic.go:334] "Generic (PLEG): container finished" podID="2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a" containerID="4c50fd5b1ef7224513e87fdd8542a92637bec70acd5fb40fcf2ed26b9675dd04" exitCode=0 Nov 25 10:45:02 crc kubenswrapper[4854]: I1125 10:45:02.837565 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-6hftp" event={"ID":"2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a","Type":"ContainerDied","Data":"4c50fd5b1ef7224513e87fdd8542a92637bec70acd5fb40fcf2ed26b9675dd04"} Nov 25 10:45:04 crc kubenswrapper[4854]: I1125 10:45:04.384918 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-6hftp" Nov 25 10:45:04 crc kubenswrapper[4854]: I1125 10:45:04.501652 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a-secret-volume\") pod \"2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a\" (UID: \"2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a\") " Nov 25 10:45:04 crc kubenswrapper[4854]: I1125 10:45:04.501928 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vxsx8\" (UniqueName: \"kubernetes.io/projected/2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a-kube-api-access-vxsx8\") pod \"2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a\" (UID: \"2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a\") " Nov 25 10:45:04 crc kubenswrapper[4854]: I1125 10:45:04.502024 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a-config-volume\") pod \"2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a\" (UID: \"2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a\") " Nov 25 10:45:04 crc kubenswrapper[4854]: I1125 10:45:04.502613 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a-config-volume" (OuterVolumeSpecName: "config-volume") pod "2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a" (UID: "2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:45:04 crc kubenswrapper[4854]: I1125 10:45:04.511842 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a-kube-api-access-vxsx8" (OuterVolumeSpecName: "kube-api-access-vxsx8") pod "2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a" (UID: "2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a"). InnerVolumeSpecName "kube-api-access-vxsx8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:45:04 crc kubenswrapper[4854]: I1125 10:45:04.511851 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a" (UID: "2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:45:04 crc kubenswrapper[4854]: I1125 10:45:04.605314 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vxsx8\" (UniqueName: \"kubernetes.io/projected/2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a-kube-api-access-vxsx8\") on node \"crc\" DevicePath \"\"" Nov 25 10:45:04 crc kubenswrapper[4854]: I1125 10:45:04.605365 4854 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 10:45:04 crc kubenswrapper[4854]: I1125 10:45:04.605387 4854 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 10:45:04 crc kubenswrapper[4854]: I1125 10:45:04.892295 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-6hftp" event={"ID":"2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a","Type":"ContainerDied","Data":"02840707e07706a90ed67744ff739833629fd19e29d139d8f621f4c3297f14c9"} Nov 25 10:45:04 crc kubenswrapper[4854]: I1125 10:45:04.892631 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="02840707e07706a90ed67744ff739833629fd19e29d139d8f621f4c3297f14c9" Nov 25 10:45:04 crc kubenswrapper[4854]: I1125 10:45:04.892324 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-6hftp" Nov 25 10:45:04 crc kubenswrapper[4854]: I1125 10:45:04.964046 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401080-wnzgj"] Nov 25 10:45:04 crc kubenswrapper[4854]: I1125 10:45:04.976167 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401080-wnzgj"] Nov 25 10:45:05 crc kubenswrapper[4854]: I1125 10:45:05.034398 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d61e667f-63aa-47e8-b22e-4a515dc5d81d" path="/var/lib/kubelet/pods/d61e667f-63aa-47e8-b22e-4a515dc5d81d/volumes" Nov 25 10:45:25 crc kubenswrapper[4854]: I1125 10:45:25.030644 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:45:25 crc kubenswrapper[4854]: I1125 10:45:25.031342 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:45:55 crc kubenswrapper[4854]: I1125 10:45:55.032953 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:45:55 crc kubenswrapper[4854]: I1125 10:45:55.033592 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:45:55 crc kubenswrapper[4854]: I1125 10:45:55.043533 4854 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" Nov 25 10:45:55 crc kubenswrapper[4854]: I1125 10:45:55.044721 4854 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b762ab57006f226c5e2e937bd59d5ef221def4e09151901c27f3a4f89a5f39c1"} pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 10:45:55 crc kubenswrapper[4854]: I1125 10:45:55.044840 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" containerID="cri-o://b762ab57006f226c5e2e937bd59d5ef221def4e09151901c27f3a4f89a5f39c1" gracePeriod=600 Nov 25 10:45:55 crc kubenswrapper[4854]: I1125 10:45:55.578278 4854 generic.go:334] "Generic (PLEG): container finished" podID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerID="b762ab57006f226c5e2e937bd59d5ef221def4e09151901c27f3a4f89a5f39c1" exitCode=0 Nov 25 10:45:55 crc kubenswrapper[4854]: I1125 10:45:55.578446 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerDied","Data":"b762ab57006f226c5e2e937bd59d5ef221def4e09151901c27f3a4f89a5f39c1"} Nov 25 10:45:55 crc kubenswrapper[4854]: I1125 10:45:55.578704 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerStarted","Data":"d68e406a269e7d1dd3fdd504a04eacc9c67b4c85d479dac6de097c6ffe2578f3"} Nov 25 10:45:55 crc kubenswrapper[4854]: I1125 10:45:55.578738 4854 scope.go:117] "RemoveContainer" containerID="edb84fd7a59ba5186401d63de1bc42d8be0434a868fd0d53853093404fcb425e" Nov 25 10:45:57 crc kubenswrapper[4854]: I1125 10:45:57.238613 4854 scope.go:117] "RemoveContainer" containerID="3a0671fde0f7292553d1434d0381b224c344f383e5cd575a0444463cc7cbf14c" Nov 25 10:47:44 crc kubenswrapper[4854]: E1125 10:47:44.767050 4854 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.184:56658->38.102.83.184:43333: write tcp 38.102.83.184:56658->38.102.83.184:43333: write: broken pipe Nov 25 10:47:55 crc kubenswrapper[4854]: I1125 10:47:55.038052 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:47:55 crc kubenswrapper[4854]: I1125 10:47:55.039042 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:48:16 crc kubenswrapper[4854]: I1125 10:48:16.592021 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="hostpath-provisioner/csi-hostpathplugin-pxbm7" podUID="4b64fc1e-475a-4d69-a7ac-a23a7b5a7909" containerName="hostpath-provisioner" probeResult="failure" output="Get \"http://10.217.0.41:9898/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:48:16 crc kubenswrapper[4854]: I1125 10:48:16.775185 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="e73606dc-c7c0-4d1e-9f87-5effe3a03611" containerName="ceilometer-central-agent" probeResult="failure" output="command timed out" Nov 25 10:48:21 crc kubenswrapper[4854]: I1125 10:48:21.776299 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="e73606dc-c7c0-4d1e-9f87-5effe3a03611" containerName="ceilometer-central-agent" probeResult="failure" output="command timed out" Nov 25 10:48:25 crc kubenswrapper[4854]: I1125 10:48:25.028434 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:48:25 crc kubenswrapper[4854]: I1125 10:48:25.028952 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:48:27 crc kubenswrapper[4854]: I1125 10:48:27.703646 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-d4wkr"] Nov 25 10:48:27 crc kubenswrapper[4854]: E1125 10:48:27.706997 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a" containerName="collect-profiles" Nov 25 10:48:27 crc kubenswrapper[4854]: I1125 10:48:27.707136 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a" containerName="collect-profiles" Nov 25 10:48:27 crc kubenswrapper[4854]: I1125 10:48:27.707534 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a" containerName="collect-profiles" Nov 25 10:48:27 crc kubenswrapper[4854]: I1125 10:48:27.710392 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-d4wkr" Nov 25 10:48:27 crc kubenswrapper[4854]: I1125 10:48:27.733552 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-d4wkr"] Nov 25 10:48:27 crc kubenswrapper[4854]: I1125 10:48:27.835739 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/362fce8b-6b6c-4c7f-8ca1-3de4609dcf03-utilities\") pod \"certified-operators-d4wkr\" (UID: \"362fce8b-6b6c-4c7f-8ca1-3de4609dcf03\") " pod="openshift-marketplace/certified-operators-d4wkr" Nov 25 10:48:27 crc kubenswrapper[4854]: I1125 10:48:27.836136 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t8mwh\" (UniqueName: \"kubernetes.io/projected/362fce8b-6b6c-4c7f-8ca1-3de4609dcf03-kube-api-access-t8mwh\") pod \"certified-operators-d4wkr\" (UID: \"362fce8b-6b6c-4c7f-8ca1-3de4609dcf03\") " pod="openshift-marketplace/certified-operators-d4wkr" Nov 25 10:48:27 crc kubenswrapper[4854]: I1125 10:48:27.836507 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/362fce8b-6b6c-4c7f-8ca1-3de4609dcf03-catalog-content\") pod \"certified-operators-d4wkr\" (UID: \"362fce8b-6b6c-4c7f-8ca1-3de4609dcf03\") " pod="openshift-marketplace/certified-operators-d4wkr" Nov 25 10:48:27 crc kubenswrapper[4854]: I1125 10:48:27.938186 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/362fce8b-6b6c-4c7f-8ca1-3de4609dcf03-utilities\") pod \"certified-operators-d4wkr\" (UID: \"362fce8b-6b6c-4c7f-8ca1-3de4609dcf03\") " pod="openshift-marketplace/certified-operators-d4wkr" Nov 25 10:48:27 crc kubenswrapper[4854]: I1125 10:48:27.938232 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t8mwh\" (UniqueName: \"kubernetes.io/projected/362fce8b-6b6c-4c7f-8ca1-3de4609dcf03-kube-api-access-t8mwh\") pod \"certified-operators-d4wkr\" (UID: \"362fce8b-6b6c-4c7f-8ca1-3de4609dcf03\") " pod="openshift-marketplace/certified-operators-d4wkr" Nov 25 10:48:27 crc kubenswrapper[4854]: I1125 10:48:27.938386 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/362fce8b-6b6c-4c7f-8ca1-3de4609dcf03-catalog-content\") pod \"certified-operators-d4wkr\" (UID: \"362fce8b-6b6c-4c7f-8ca1-3de4609dcf03\") " pod="openshift-marketplace/certified-operators-d4wkr" Nov 25 10:48:27 crc kubenswrapper[4854]: I1125 10:48:27.938740 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/362fce8b-6b6c-4c7f-8ca1-3de4609dcf03-utilities\") pod \"certified-operators-d4wkr\" (UID: \"362fce8b-6b6c-4c7f-8ca1-3de4609dcf03\") " pod="openshift-marketplace/certified-operators-d4wkr" Nov 25 10:48:27 crc kubenswrapper[4854]: I1125 10:48:27.938804 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/362fce8b-6b6c-4c7f-8ca1-3de4609dcf03-catalog-content\") pod \"certified-operators-d4wkr\" (UID: \"362fce8b-6b6c-4c7f-8ca1-3de4609dcf03\") " pod="openshift-marketplace/certified-operators-d4wkr" Nov 25 10:48:27 crc kubenswrapper[4854]: I1125 10:48:27.965620 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t8mwh\" (UniqueName: \"kubernetes.io/projected/362fce8b-6b6c-4c7f-8ca1-3de4609dcf03-kube-api-access-t8mwh\") pod \"certified-operators-d4wkr\" (UID: \"362fce8b-6b6c-4c7f-8ca1-3de4609dcf03\") " pod="openshift-marketplace/certified-operators-d4wkr" Nov 25 10:48:28 crc kubenswrapper[4854]: I1125 10:48:28.050172 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-d4wkr" Nov 25 10:48:28 crc kubenswrapper[4854]: I1125 10:48:28.659013 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-d4wkr"] Nov 25 10:48:28 crc kubenswrapper[4854]: I1125 10:48:28.717575 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-d4wkr" event={"ID":"362fce8b-6b6c-4c7f-8ca1-3de4609dcf03","Type":"ContainerStarted","Data":"d739c267ce6ea5f373a848a56ff2e0fac308546a13cd121fe071ec5eb3d80fa8"} Nov 25 10:48:29 crc kubenswrapper[4854]: I1125 10:48:29.733299 4854 generic.go:334] "Generic (PLEG): container finished" podID="362fce8b-6b6c-4c7f-8ca1-3de4609dcf03" containerID="38d29644397afa06f8f13ddcc0a2302771874ffa9f6a15adab6f3569b8f563bc" exitCode=0 Nov 25 10:48:29 crc kubenswrapper[4854]: I1125 10:48:29.733415 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-d4wkr" event={"ID":"362fce8b-6b6c-4c7f-8ca1-3de4609dcf03","Type":"ContainerDied","Data":"38d29644397afa06f8f13ddcc0a2302771874ffa9f6a15adab6f3569b8f563bc"} Nov 25 10:48:29 crc kubenswrapper[4854]: I1125 10:48:29.736311 4854 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 10:48:31 crc kubenswrapper[4854]: I1125 10:48:31.762956 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-d4wkr" event={"ID":"362fce8b-6b6c-4c7f-8ca1-3de4609dcf03","Type":"ContainerStarted","Data":"6acd69bc1da2828d04684a8ce3c433615aec7e93d99869a512cc2815f85eed36"} Nov 25 10:48:35 crc kubenswrapper[4854]: I1125 10:48:35.823299 4854 generic.go:334] "Generic (PLEG): container finished" podID="362fce8b-6b6c-4c7f-8ca1-3de4609dcf03" containerID="6acd69bc1da2828d04684a8ce3c433615aec7e93d99869a512cc2815f85eed36" exitCode=0 Nov 25 10:48:35 crc kubenswrapper[4854]: I1125 10:48:35.823374 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-d4wkr" event={"ID":"362fce8b-6b6c-4c7f-8ca1-3de4609dcf03","Type":"ContainerDied","Data":"6acd69bc1da2828d04684a8ce3c433615aec7e93d99869a512cc2815f85eed36"} Nov 25 10:48:36 crc kubenswrapper[4854]: E1125 10:48:36.228724 4854 log.go:32] "ImageFsInfo from image service failed" err="rpc error: code = Unknown desc = get image fs info unable to get usage for /var/lib/containers/storage/overlay-images: get disk usage for path /var/lib/containers/storage/overlay-images: lstat /var/lib/containers/storage/overlay-images/.tmp-images.json3995554604: no such file or directory" Nov 25 10:48:36 crc kubenswrapper[4854]: E1125 10:48:36.230335 4854 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get imageFs stats: missing image stats: nil" Nov 25 10:48:37 crc kubenswrapper[4854]: I1125 10:48:37.849589 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-d4wkr" event={"ID":"362fce8b-6b6c-4c7f-8ca1-3de4609dcf03","Type":"ContainerStarted","Data":"75f0e119131e0157fdb385f8b0f9ef82c6e3bd551dfec62f661544334af169f0"} Nov 25 10:48:37 crc kubenswrapper[4854]: I1125 10:48:37.882731 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-d4wkr" podStartSLOduration=4.384669607 podStartE2EDuration="10.88270535s" podCreationTimestamp="2025-11-25 10:48:27 +0000 UTC" firstStartedPulling="2025-11-25 10:48:29.735956593 +0000 UTC m=+4315.588949979" lastFinishedPulling="2025-11-25 10:48:36.233992346 +0000 UTC m=+4322.086985722" observedRunningTime="2025-11-25 10:48:37.874244459 +0000 UTC m=+4323.727237865" watchObservedRunningTime="2025-11-25 10:48:37.88270535 +0000 UTC m=+4323.735698746" Nov 25 10:48:38 crc kubenswrapper[4854]: I1125 10:48:38.050622 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-d4wkr" Nov 25 10:48:38 crc kubenswrapper[4854]: I1125 10:48:38.050720 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-d4wkr" Nov 25 10:48:39 crc kubenswrapper[4854]: I1125 10:48:39.133755 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-d4wkr" podUID="362fce8b-6b6c-4c7f-8ca1-3de4609dcf03" containerName="registry-server" probeResult="failure" output=< Nov 25 10:48:39 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 10:48:39 crc kubenswrapper[4854]: > Nov 25 10:48:48 crc kubenswrapper[4854]: I1125 10:48:48.951015 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-d4wkr" Nov 25 10:48:49 crc kubenswrapper[4854]: I1125 10:48:49.026083 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-d4wkr" Nov 25 10:48:49 crc kubenswrapper[4854]: I1125 10:48:49.195490 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-d4wkr"] Nov 25 10:48:49 crc kubenswrapper[4854]: I1125 10:48:49.978759 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-d4wkr" podUID="362fce8b-6b6c-4c7f-8ca1-3de4609dcf03" containerName="registry-server" containerID="cri-o://75f0e119131e0157fdb385f8b0f9ef82c6e3bd551dfec62f661544334af169f0" gracePeriod=2 Nov 25 10:48:50 crc kubenswrapper[4854]: I1125 10:48:50.592506 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-d4wkr" Nov 25 10:48:50 crc kubenswrapper[4854]: I1125 10:48:50.715825 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t8mwh\" (UniqueName: \"kubernetes.io/projected/362fce8b-6b6c-4c7f-8ca1-3de4609dcf03-kube-api-access-t8mwh\") pod \"362fce8b-6b6c-4c7f-8ca1-3de4609dcf03\" (UID: \"362fce8b-6b6c-4c7f-8ca1-3de4609dcf03\") " Nov 25 10:48:50 crc kubenswrapper[4854]: I1125 10:48:50.716000 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/362fce8b-6b6c-4c7f-8ca1-3de4609dcf03-utilities\") pod \"362fce8b-6b6c-4c7f-8ca1-3de4609dcf03\" (UID: \"362fce8b-6b6c-4c7f-8ca1-3de4609dcf03\") " Nov 25 10:48:50 crc kubenswrapper[4854]: I1125 10:48:50.716372 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/362fce8b-6b6c-4c7f-8ca1-3de4609dcf03-catalog-content\") pod \"362fce8b-6b6c-4c7f-8ca1-3de4609dcf03\" (UID: \"362fce8b-6b6c-4c7f-8ca1-3de4609dcf03\") " Nov 25 10:48:50 crc kubenswrapper[4854]: I1125 10:48:50.716943 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/362fce8b-6b6c-4c7f-8ca1-3de4609dcf03-utilities" (OuterVolumeSpecName: "utilities") pod "362fce8b-6b6c-4c7f-8ca1-3de4609dcf03" (UID: "362fce8b-6b6c-4c7f-8ca1-3de4609dcf03"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:48:50 crc kubenswrapper[4854]: I1125 10:48:50.717247 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/362fce8b-6b6c-4c7f-8ca1-3de4609dcf03-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:48:50 crc kubenswrapper[4854]: I1125 10:48:50.729971 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/362fce8b-6b6c-4c7f-8ca1-3de4609dcf03-kube-api-access-t8mwh" (OuterVolumeSpecName: "kube-api-access-t8mwh") pod "362fce8b-6b6c-4c7f-8ca1-3de4609dcf03" (UID: "362fce8b-6b6c-4c7f-8ca1-3de4609dcf03"). InnerVolumeSpecName "kube-api-access-t8mwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:48:50 crc kubenswrapper[4854]: I1125 10:48:50.775314 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/362fce8b-6b6c-4c7f-8ca1-3de4609dcf03-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "362fce8b-6b6c-4c7f-8ca1-3de4609dcf03" (UID: "362fce8b-6b6c-4c7f-8ca1-3de4609dcf03"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:48:50 crc kubenswrapper[4854]: I1125 10:48:50.820426 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/362fce8b-6b6c-4c7f-8ca1-3de4609dcf03-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:48:50 crc kubenswrapper[4854]: I1125 10:48:50.820479 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t8mwh\" (UniqueName: \"kubernetes.io/projected/362fce8b-6b6c-4c7f-8ca1-3de4609dcf03-kube-api-access-t8mwh\") on node \"crc\" DevicePath \"\"" Nov 25 10:48:50 crc kubenswrapper[4854]: I1125 10:48:50.993493 4854 generic.go:334] "Generic (PLEG): container finished" podID="362fce8b-6b6c-4c7f-8ca1-3de4609dcf03" containerID="75f0e119131e0157fdb385f8b0f9ef82c6e3bd551dfec62f661544334af169f0" exitCode=0 Nov 25 10:48:50 crc kubenswrapper[4854]: I1125 10:48:50.993591 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-d4wkr" event={"ID":"362fce8b-6b6c-4c7f-8ca1-3de4609dcf03","Type":"ContainerDied","Data":"75f0e119131e0157fdb385f8b0f9ef82c6e3bd551dfec62f661544334af169f0"} Nov 25 10:48:50 crc kubenswrapper[4854]: I1125 10:48:50.994050 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-d4wkr" event={"ID":"362fce8b-6b6c-4c7f-8ca1-3de4609dcf03","Type":"ContainerDied","Data":"d739c267ce6ea5f373a848a56ff2e0fac308546a13cd121fe071ec5eb3d80fa8"} Nov 25 10:48:50 crc kubenswrapper[4854]: I1125 10:48:50.993651 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-d4wkr" Nov 25 10:48:50 crc kubenswrapper[4854]: I1125 10:48:50.994091 4854 scope.go:117] "RemoveContainer" containerID="75f0e119131e0157fdb385f8b0f9ef82c6e3bd551dfec62f661544334af169f0" Nov 25 10:48:51 crc kubenswrapper[4854]: I1125 10:48:51.051114 4854 scope.go:117] "RemoveContainer" containerID="6acd69bc1da2828d04684a8ce3c433615aec7e93d99869a512cc2815f85eed36" Nov 25 10:48:51 crc kubenswrapper[4854]: I1125 10:48:51.054106 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-d4wkr"] Nov 25 10:48:51 crc kubenswrapper[4854]: I1125 10:48:51.064376 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-d4wkr"] Nov 25 10:48:51 crc kubenswrapper[4854]: I1125 10:48:51.085713 4854 scope.go:117] "RemoveContainer" containerID="38d29644397afa06f8f13ddcc0a2302771874ffa9f6a15adab6f3569b8f563bc" Nov 25 10:48:51 crc kubenswrapper[4854]: I1125 10:48:51.149399 4854 scope.go:117] "RemoveContainer" containerID="75f0e119131e0157fdb385f8b0f9ef82c6e3bd551dfec62f661544334af169f0" Nov 25 10:48:51 crc kubenswrapper[4854]: E1125 10:48:51.150169 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"75f0e119131e0157fdb385f8b0f9ef82c6e3bd551dfec62f661544334af169f0\": container with ID starting with 75f0e119131e0157fdb385f8b0f9ef82c6e3bd551dfec62f661544334af169f0 not found: ID does not exist" containerID="75f0e119131e0157fdb385f8b0f9ef82c6e3bd551dfec62f661544334af169f0" Nov 25 10:48:51 crc kubenswrapper[4854]: I1125 10:48:51.150356 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"75f0e119131e0157fdb385f8b0f9ef82c6e3bd551dfec62f661544334af169f0"} err="failed to get container status \"75f0e119131e0157fdb385f8b0f9ef82c6e3bd551dfec62f661544334af169f0\": rpc error: code = NotFound desc = could not find container \"75f0e119131e0157fdb385f8b0f9ef82c6e3bd551dfec62f661544334af169f0\": container with ID starting with 75f0e119131e0157fdb385f8b0f9ef82c6e3bd551dfec62f661544334af169f0 not found: ID does not exist" Nov 25 10:48:51 crc kubenswrapper[4854]: I1125 10:48:51.150481 4854 scope.go:117] "RemoveContainer" containerID="6acd69bc1da2828d04684a8ce3c433615aec7e93d99869a512cc2815f85eed36" Nov 25 10:48:51 crc kubenswrapper[4854]: E1125 10:48:51.151302 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6acd69bc1da2828d04684a8ce3c433615aec7e93d99869a512cc2815f85eed36\": container with ID starting with 6acd69bc1da2828d04684a8ce3c433615aec7e93d99869a512cc2815f85eed36 not found: ID does not exist" containerID="6acd69bc1da2828d04684a8ce3c433615aec7e93d99869a512cc2815f85eed36" Nov 25 10:48:51 crc kubenswrapper[4854]: I1125 10:48:51.151454 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6acd69bc1da2828d04684a8ce3c433615aec7e93d99869a512cc2815f85eed36"} err="failed to get container status \"6acd69bc1da2828d04684a8ce3c433615aec7e93d99869a512cc2815f85eed36\": rpc error: code = NotFound desc = could not find container \"6acd69bc1da2828d04684a8ce3c433615aec7e93d99869a512cc2815f85eed36\": container with ID starting with 6acd69bc1da2828d04684a8ce3c433615aec7e93d99869a512cc2815f85eed36 not found: ID does not exist" Nov 25 10:48:51 crc kubenswrapper[4854]: I1125 10:48:51.151568 4854 scope.go:117] "RemoveContainer" containerID="38d29644397afa06f8f13ddcc0a2302771874ffa9f6a15adab6f3569b8f563bc" Nov 25 10:48:51 crc kubenswrapper[4854]: E1125 10:48:51.152073 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38d29644397afa06f8f13ddcc0a2302771874ffa9f6a15adab6f3569b8f563bc\": container with ID starting with 38d29644397afa06f8f13ddcc0a2302771874ffa9f6a15adab6f3569b8f563bc not found: ID does not exist" containerID="38d29644397afa06f8f13ddcc0a2302771874ffa9f6a15adab6f3569b8f563bc" Nov 25 10:48:51 crc kubenswrapper[4854]: I1125 10:48:51.152114 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38d29644397afa06f8f13ddcc0a2302771874ffa9f6a15adab6f3569b8f563bc"} err="failed to get container status \"38d29644397afa06f8f13ddcc0a2302771874ffa9f6a15adab6f3569b8f563bc\": rpc error: code = NotFound desc = could not find container \"38d29644397afa06f8f13ddcc0a2302771874ffa9f6a15adab6f3569b8f563bc\": container with ID starting with 38d29644397afa06f8f13ddcc0a2302771874ffa9f6a15adab6f3569b8f563bc not found: ID does not exist" Nov 25 10:48:53 crc kubenswrapper[4854]: I1125 10:48:53.025741 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="362fce8b-6b6c-4c7f-8ca1-3de4609dcf03" path="/var/lib/kubelet/pods/362fce8b-6b6c-4c7f-8ca1-3de4609dcf03/volumes" Nov 25 10:48:55 crc kubenswrapper[4854]: I1125 10:48:55.028439 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:48:55 crc kubenswrapper[4854]: I1125 10:48:55.029058 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:48:55 crc kubenswrapper[4854]: I1125 10:48:55.032005 4854 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" Nov 25 10:48:55 crc kubenswrapper[4854]: I1125 10:48:55.033188 4854 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d68e406a269e7d1dd3fdd504a04eacc9c67b4c85d479dac6de097c6ffe2578f3"} pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 10:48:55 crc kubenswrapper[4854]: I1125 10:48:55.033332 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" containerID="cri-o://d68e406a269e7d1dd3fdd504a04eacc9c67b4c85d479dac6de097c6ffe2578f3" gracePeriod=600 Nov 25 10:48:55 crc kubenswrapper[4854]: E1125 10:48:55.164103 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:48:56 crc kubenswrapper[4854]: I1125 10:48:56.065696 4854 generic.go:334] "Generic (PLEG): container finished" podID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerID="d68e406a269e7d1dd3fdd504a04eacc9c67b4c85d479dac6de097c6ffe2578f3" exitCode=0 Nov 25 10:48:56 crc kubenswrapper[4854]: I1125 10:48:56.065762 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerDied","Data":"d68e406a269e7d1dd3fdd504a04eacc9c67b4c85d479dac6de097c6ffe2578f3"} Nov 25 10:48:56 crc kubenswrapper[4854]: I1125 10:48:56.065808 4854 scope.go:117] "RemoveContainer" containerID="b762ab57006f226c5e2e937bd59d5ef221def4e09151901c27f3a4f89a5f39c1" Nov 25 10:48:56 crc kubenswrapper[4854]: I1125 10:48:56.067061 4854 scope.go:117] "RemoveContainer" containerID="d68e406a269e7d1dd3fdd504a04eacc9c67b4c85d479dac6de097c6ffe2578f3" Nov 25 10:48:56 crc kubenswrapper[4854]: E1125 10:48:56.067792 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:49:07 crc kubenswrapper[4854]: I1125 10:49:07.014596 4854 scope.go:117] "RemoveContainer" containerID="d68e406a269e7d1dd3fdd504a04eacc9c67b4c85d479dac6de097c6ffe2578f3" Nov 25 10:49:07 crc kubenswrapper[4854]: E1125 10:49:07.015828 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:49:22 crc kubenswrapper[4854]: I1125 10:49:22.013912 4854 scope.go:117] "RemoveContainer" containerID="d68e406a269e7d1dd3fdd504a04eacc9c67b4c85d479dac6de097c6ffe2578f3" Nov 25 10:49:22 crc kubenswrapper[4854]: E1125 10:49:22.014938 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:49:35 crc kubenswrapper[4854]: I1125 10:49:35.028665 4854 scope.go:117] "RemoveContainer" containerID="d68e406a269e7d1dd3fdd504a04eacc9c67b4c85d479dac6de097c6ffe2578f3" Nov 25 10:49:35 crc kubenswrapper[4854]: E1125 10:49:35.029734 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:49:46 crc kubenswrapper[4854]: I1125 10:49:46.016000 4854 scope.go:117] "RemoveContainer" containerID="d68e406a269e7d1dd3fdd504a04eacc9c67b4c85d479dac6de097c6ffe2578f3" Nov 25 10:49:46 crc kubenswrapper[4854]: E1125 10:49:46.017025 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:49:59 crc kubenswrapper[4854]: I1125 10:49:59.032022 4854 scope.go:117] "RemoveContainer" containerID="d68e406a269e7d1dd3fdd504a04eacc9c67b4c85d479dac6de097c6ffe2578f3" Nov 25 10:49:59 crc kubenswrapper[4854]: E1125 10:49:59.033460 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:50:09 crc kubenswrapper[4854]: E1125 10:50:09.283170 4854 upgradeaware.go:441] Error proxying data from backend to client: writeto tcp 38.102.83.184:55510->38.102.83.184:43333: read tcp 38.102.83.184:55510->38.102.83.184:43333: read: connection reset by peer Nov 25 10:50:14 crc kubenswrapper[4854]: I1125 10:50:14.014700 4854 scope.go:117] "RemoveContainer" containerID="d68e406a269e7d1dd3fdd504a04eacc9c67b4c85d479dac6de097c6ffe2578f3" Nov 25 10:50:14 crc kubenswrapper[4854]: E1125 10:50:14.015796 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:50:23 crc kubenswrapper[4854]: I1125 10:50:23.640659 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-xhz74"] Nov 25 10:50:23 crc kubenswrapper[4854]: E1125 10:50:23.641734 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="362fce8b-6b6c-4c7f-8ca1-3de4609dcf03" containerName="extract-content" Nov 25 10:50:23 crc kubenswrapper[4854]: I1125 10:50:23.641750 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="362fce8b-6b6c-4c7f-8ca1-3de4609dcf03" containerName="extract-content" Nov 25 10:50:23 crc kubenswrapper[4854]: E1125 10:50:23.641781 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="362fce8b-6b6c-4c7f-8ca1-3de4609dcf03" containerName="extract-utilities" Nov 25 10:50:23 crc kubenswrapper[4854]: I1125 10:50:23.641787 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="362fce8b-6b6c-4c7f-8ca1-3de4609dcf03" containerName="extract-utilities" Nov 25 10:50:23 crc kubenswrapper[4854]: E1125 10:50:23.641799 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="362fce8b-6b6c-4c7f-8ca1-3de4609dcf03" containerName="registry-server" Nov 25 10:50:23 crc kubenswrapper[4854]: I1125 10:50:23.641806 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="362fce8b-6b6c-4c7f-8ca1-3de4609dcf03" containerName="registry-server" Nov 25 10:50:23 crc kubenswrapper[4854]: I1125 10:50:23.642093 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="362fce8b-6b6c-4c7f-8ca1-3de4609dcf03" containerName="registry-server" Nov 25 10:50:23 crc kubenswrapper[4854]: I1125 10:50:23.644104 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xhz74" Nov 25 10:50:23 crc kubenswrapper[4854]: I1125 10:50:23.662753 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xhz74"] Nov 25 10:50:23 crc kubenswrapper[4854]: I1125 10:50:23.764992 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zpzvc\" (UniqueName: \"kubernetes.io/projected/ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e-kube-api-access-zpzvc\") pod \"redhat-operators-xhz74\" (UID: \"ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e\") " pod="openshift-marketplace/redhat-operators-xhz74" Nov 25 10:50:23 crc kubenswrapper[4854]: I1125 10:50:23.765148 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e-catalog-content\") pod \"redhat-operators-xhz74\" (UID: \"ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e\") " pod="openshift-marketplace/redhat-operators-xhz74" Nov 25 10:50:23 crc kubenswrapper[4854]: I1125 10:50:23.765602 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e-utilities\") pod \"redhat-operators-xhz74\" (UID: \"ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e\") " pod="openshift-marketplace/redhat-operators-xhz74" Nov 25 10:50:23 crc kubenswrapper[4854]: I1125 10:50:23.868326 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zpzvc\" (UniqueName: \"kubernetes.io/projected/ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e-kube-api-access-zpzvc\") pod \"redhat-operators-xhz74\" (UID: \"ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e\") " pod="openshift-marketplace/redhat-operators-xhz74" Nov 25 10:50:23 crc kubenswrapper[4854]: I1125 10:50:23.868386 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e-catalog-content\") pod \"redhat-operators-xhz74\" (UID: \"ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e\") " pod="openshift-marketplace/redhat-operators-xhz74" Nov 25 10:50:23 crc kubenswrapper[4854]: I1125 10:50:23.868520 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e-utilities\") pod \"redhat-operators-xhz74\" (UID: \"ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e\") " pod="openshift-marketplace/redhat-operators-xhz74" Nov 25 10:50:23 crc kubenswrapper[4854]: I1125 10:50:23.869061 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e-utilities\") pod \"redhat-operators-xhz74\" (UID: \"ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e\") " pod="openshift-marketplace/redhat-operators-xhz74" Nov 25 10:50:23 crc kubenswrapper[4854]: I1125 10:50:23.869056 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e-catalog-content\") pod \"redhat-operators-xhz74\" (UID: \"ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e\") " pod="openshift-marketplace/redhat-operators-xhz74" Nov 25 10:50:23 crc kubenswrapper[4854]: I1125 10:50:23.890627 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zpzvc\" (UniqueName: \"kubernetes.io/projected/ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e-kube-api-access-zpzvc\") pod \"redhat-operators-xhz74\" (UID: \"ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e\") " pod="openshift-marketplace/redhat-operators-xhz74" Nov 25 10:50:24 crc kubenswrapper[4854]: I1125 10:50:24.004057 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xhz74" Nov 25 10:50:24 crc kubenswrapper[4854]: I1125 10:50:24.610910 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xhz74"] Nov 25 10:50:26 crc kubenswrapper[4854]: I1125 10:50:26.013804 4854 scope.go:117] "RemoveContainer" containerID="d68e406a269e7d1dd3fdd504a04eacc9c67b4c85d479dac6de097c6ffe2578f3" Nov 25 10:50:26 crc kubenswrapper[4854]: E1125 10:50:26.014818 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:50:26 crc kubenswrapper[4854]: I1125 10:50:26.288761 4854 generic.go:334] "Generic (PLEG): container finished" podID="ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e" containerID="58cd5a3cb1e7fef47bebbcefdcd12da46d76ec091c0e6e9e8bcd1025976ff9b1" exitCode=0 Nov 25 10:50:26 crc kubenswrapper[4854]: I1125 10:50:26.288820 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xhz74" event={"ID":"ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e","Type":"ContainerDied","Data":"58cd5a3cb1e7fef47bebbcefdcd12da46d76ec091c0e6e9e8bcd1025976ff9b1"} Nov 25 10:50:26 crc kubenswrapper[4854]: I1125 10:50:26.288852 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xhz74" event={"ID":"ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e","Type":"ContainerStarted","Data":"ec672a6d6cfccf004ffa8b10444409969dd1000b37b8fde333d4ea8fdff78bea"} Nov 25 10:50:31 crc kubenswrapper[4854]: I1125 10:50:31.357463 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xhz74" event={"ID":"ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e","Type":"ContainerStarted","Data":"5b95aec3e7fe603e9ed1eeadf7627816557082dd2b761f38924355d3601c9917"} Nov 25 10:50:37 crc kubenswrapper[4854]: I1125 10:50:37.014198 4854 scope.go:117] "RemoveContainer" containerID="d68e406a269e7d1dd3fdd504a04eacc9c67b4c85d479dac6de097c6ffe2578f3" Nov 25 10:50:37 crc kubenswrapper[4854]: E1125 10:50:37.015429 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:50:47 crc kubenswrapper[4854]: I1125 10:50:47.582038 4854 generic.go:334] "Generic (PLEG): container finished" podID="ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e" containerID="5b95aec3e7fe603e9ed1eeadf7627816557082dd2b761f38924355d3601c9917" exitCode=0 Nov 25 10:50:47 crc kubenswrapper[4854]: I1125 10:50:47.582175 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xhz74" event={"ID":"ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e","Type":"ContainerDied","Data":"5b95aec3e7fe603e9ed1eeadf7627816557082dd2b761f38924355d3601c9917"} Nov 25 10:50:48 crc kubenswrapper[4854]: I1125 10:50:48.602106 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xhz74" event={"ID":"ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e","Type":"ContainerStarted","Data":"3b7e9dce37f64ee763b726426860b7c7056f5b7a7e6e1583ba19405c7120dfa6"} Nov 25 10:50:48 crc kubenswrapper[4854]: I1125 10:50:48.644554 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-xhz74" podStartSLOduration=3.75532104 podStartE2EDuration="25.644526452s" podCreationTimestamp="2025-11-25 10:50:23 +0000 UTC" firstStartedPulling="2025-11-25 10:50:26.292278879 +0000 UTC m=+4432.145272265" lastFinishedPulling="2025-11-25 10:50:48.181484301 +0000 UTC m=+4454.034477677" observedRunningTime="2025-11-25 10:50:48.630082487 +0000 UTC m=+4454.483075933" watchObservedRunningTime="2025-11-25 10:50:48.644526452 +0000 UTC m=+4454.497519838" Nov 25 10:50:52 crc kubenswrapper[4854]: I1125 10:50:52.013725 4854 scope.go:117] "RemoveContainer" containerID="d68e406a269e7d1dd3fdd504a04eacc9c67b4c85d479dac6de097c6ffe2578f3" Nov 25 10:50:52 crc kubenswrapper[4854]: E1125 10:50:52.014556 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:50:54 crc kubenswrapper[4854]: I1125 10:50:54.005874 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-xhz74" Nov 25 10:50:54 crc kubenswrapper[4854]: I1125 10:50:54.006599 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-xhz74" Nov 25 10:50:55 crc kubenswrapper[4854]: I1125 10:50:55.077257 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-xhz74" podUID="ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e" containerName="registry-server" probeResult="failure" output=< Nov 25 10:50:55 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 10:50:55 crc kubenswrapper[4854]: > Nov 25 10:51:05 crc kubenswrapper[4854]: I1125 10:51:05.061337 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-xhz74" podUID="ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e" containerName="registry-server" probeResult="failure" output=< Nov 25 10:51:05 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 10:51:05 crc kubenswrapper[4854]: > Nov 25 10:51:06 crc kubenswrapper[4854]: I1125 10:51:06.015513 4854 scope.go:117] "RemoveContainer" containerID="d68e406a269e7d1dd3fdd504a04eacc9c67b4c85d479dac6de097c6ffe2578f3" Nov 25 10:51:06 crc kubenswrapper[4854]: E1125 10:51:06.016235 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:51:15 crc kubenswrapper[4854]: I1125 10:51:15.065267 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-xhz74" podUID="ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e" containerName="registry-server" probeResult="failure" output=< Nov 25 10:51:15 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 10:51:15 crc kubenswrapper[4854]: > Nov 25 10:51:18 crc kubenswrapper[4854]: I1125 10:51:18.014968 4854 scope.go:117] "RemoveContainer" containerID="d68e406a269e7d1dd3fdd504a04eacc9c67b4c85d479dac6de097c6ffe2578f3" Nov 25 10:51:18 crc kubenswrapper[4854]: E1125 10:51:18.015997 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:51:24 crc kubenswrapper[4854]: I1125 10:51:24.058872 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-xhz74" Nov 25 10:51:24 crc kubenswrapper[4854]: I1125 10:51:24.122710 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-xhz74" Nov 25 10:51:24 crc kubenswrapper[4854]: I1125 10:51:24.855029 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xhz74"] Nov 25 10:51:26 crc kubenswrapper[4854]: I1125 10:51:26.072851 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-xhz74" podUID="ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e" containerName="registry-server" containerID="cri-o://3b7e9dce37f64ee763b726426860b7c7056f5b7a7e6e1583ba19405c7120dfa6" gracePeriod=2 Nov 25 10:51:26 crc kubenswrapper[4854]: I1125 10:51:26.733045 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xhz74" Nov 25 10:51:26 crc kubenswrapper[4854]: I1125 10:51:26.823343 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e-utilities\") pod \"ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e\" (UID: \"ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e\") " Nov 25 10:51:26 crc kubenswrapper[4854]: I1125 10:51:26.823952 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zpzvc\" (UniqueName: \"kubernetes.io/projected/ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e-kube-api-access-zpzvc\") pod \"ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e\" (UID: \"ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e\") " Nov 25 10:51:26 crc kubenswrapper[4854]: I1125 10:51:26.824041 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e-catalog-content\") pod \"ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e\" (UID: \"ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e\") " Nov 25 10:51:26 crc kubenswrapper[4854]: I1125 10:51:26.824152 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e-utilities" (OuterVolumeSpecName: "utilities") pod "ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e" (UID: "ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:51:26 crc kubenswrapper[4854]: I1125 10:51:26.824863 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:51:26 crc kubenswrapper[4854]: I1125 10:51:26.833086 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e-kube-api-access-zpzvc" (OuterVolumeSpecName: "kube-api-access-zpzvc") pod "ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e" (UID: "ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e"). InnerVolumeSpecName "kube-api-access-zpzvc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:51:26 crc kubenswrapper[4854]: I1125 10:51:26.927131 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zpzvc\" (UniqueName: \"kubernetes.io/projected/ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e-kube-api-access-zpzvc\") on node \"crc\" DevicePath \"\"" Nov 25 10:51:26 crc kubenswrapper[4854]: I1125 10:51:26.939123 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e" (UID: "ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:51:27 crc kubenswrapper[4854]: I1125 10:51:27.029395 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:51:27 crc kubenswrapper[4854]: I1125 10:51:27.084178 4854 generic.go:334] "Generic (PLEG): container finished" podID="ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e" containerID="3b7e9dce37f64ee763b726426860b7c7056f5b7a7e6e1583ba19405c7120dfa6" exitCode=0 Nov 25 10:51:27 crc kubenswrapper[4854]: I1125 10:51:27.084220 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xhz74" event={"ID":"ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e","Type":"ContainerDied","Data":"3b7e9dce37f64ee763b726426860b7c7056f5b7a7e6e1583ba19405c7120dfa6"} Nov 25 10:51:27 crc kubenswrapper[4854]: I1125 10:51:27.084246 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xhz74" event={"ID":"ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e","Type":"ContainerDied","Data":"ec672a6d6cfccf004ffa8b10444409969dd1000b37b8fde333d4ea8fdff78bea"} Nov 25 10:51:27 crc kubenswrapper[4854]: I1125 10:51:27.084261 4854 scope.go:117] "RemoveContainer" containerID="3b7e9dce37f64ee763b726426860b7c7056f5b7a7e6e1583ba19405c7120dfa6" Nov 25 10:51:27 crc kubenswrapper[4854]: I1125 10:51:27.084379 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xhz74" Nov 25 10:51:27 crc kubenswrapper[4854]: I1125 10:51:27.110929 4854 scope.go:117] "RemoveContainer" containerID="5b95aec3e7fe603e9ed1eeadf7627816557082dd2b761f38924355d3601c9917" Nov 25 10:51:27 crc kubenswrapper[4854]: I1125 10:51:27.124893 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xhz74"] Nov 25 10:51:27 crc kubenswrapper[4854]: I1125 10:51:27.135945 4854 scope.go:117] "RemoveContainer" containerID="58cd5a3cb1e7fef47bebbcefdcd12da46d76ec091c0e6e9e8bcd1025976ff9b1" Nov 25 10:51:27 crc kubenswrapper[4854]: I1125 10:51:27.137891 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-xhz74"] Nov 25 10:51:27 crc kubenswrapper[4854]: I1125 10:51:27.197173 4854 scope.go:117] "RemoveContainer" containerID="3b7e9dce37f64ee763b726426860b7c7056f5b7a7e6e1583ba19405c7120dfa6" Nov 25 10:51:27 crc kubenswrapper[4854]: E1125 10:51:27.197641 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3b7e9dce37f64ee763b726426860b7c7056f5b7a7e6e1583ba19405c7120dfa6\": container with ID starting with 3b7e9dce37f64ee763b726426860b7c7056f5b7a7e6e1583ba19405c7120dfa6 not found: ID does not exist" containerID="3b7e9dce37f64ee763b726426860b7c7056f5b7a7e6e1583ba19405c7120dfa6" Nov 25 10:51:27 crc kubenswrapper[4854]: I1125 10:51:27.197687 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3b7e9dce37f64ee763b726426860b7c7056f5b7a7e6e1583ba19405c7120dfa6"} err="failed to get container status \"3b7e9dce37f64ee763b726426860b7c7056f5b7a7e6e1583ba19405c7120dfa6\": rpc error: code = NotFound desc = could not find container \"3b7e9dce37f64ee763b726426860b7c7056f5b7a7e6e1583ba19405c7120dfa6\": container with ID starting with 3b7e9dce37f64ee763b726426860b7c7056f5b7a7e6e1583ba19405c7120dfa6 not found: ID does not exist" Nov 25 10:51:27 crc kubenswrapper[4854]: I1125 10:51:27.197715 4854 scope.go:117] "RemoveContainer" containerID="5b95aec3e7fe603e9ed1eeadf7627816557082dd2b761f38924355d3601c9917" Nov 25 10:51:27 crc kubenswrapper[4854]: E1125 10:51:27.198417 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5b95aec3e7fe603e9ed1eeadf7627816557082dd2b761f38924355d3601c9917\": container with ID starting with 5b95aec3e7fe603e9ed1eeadf7627816557082dd2b761f38924355d3601c9917 not found: ID does not exist" containerID="5b95aec3e7fe603e9ed1eeadf7627816557082dd2b761f38924355d3601c9917" Nov 25 10:51:27 crc kubenswrapper[4854]: I1125 10:51:27.198459 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5b95aec3e7fe603e9ed1eeadf7627816557082dd2b761f38924355d3601c9917"} err="failed to get container status \"5b95aec3e7fe603e9ed1eeadf7627816557082dd2b761f38924355d3601c9917\": rpc error: code = NotFound desc = could not find container \"5b95aec3e7fe603e9ed1eeadf7627816557082dd2b761f38924355d3601c9917\": container with ID starting with 5b95aec3e7fe603e9ed1eeadf7627816557082dd2b761f38924355d3601c9917 not found: ID does not exist" Nov 25 10:51:27 crc kubenswrapper[4854]: I1125 10:51:27.198485 4854 scope.go:117] "RemoveContainer" containerID="58cd5a3cb1e7fef47bebbcefdcd12da46d76ec091c0e6e9e8bcd1025976ff9b1" Nov 25 10:51:27 crc kubenswrapper[4854]: E1125 10:51:27.198853 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"58cd5a3cb1e7fef47bebbcefdcd12da46d76ec091c0e6e9e8bcd1025976ff9b1\": container with ID starting with 58cd5a3cb1e7fef47bebbcefdcd12da46d76ec091c0e6e9e8bcd1025976ff9b1 not found: ID does not exist" containerID="58cd5a3cb1e7fef47bebbcefdcd12da46d76ec091c0e6e9e8bcd1025976ff9b1" Nov 25 10:51:27 crc kubenswrapper[4854]: I1125 10:51:27.198880 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"58cd5a3cb1e7fef47bebbcefdcd12da46d76ec091c0e6e9e8bcd1025976ff9b1"} err="failed to get container status \"58cd5a3cb1e7fef47bebbcefdcd12da46d76ec091c0e6e9e8bcd1025976ff9b1\": rpc error: code = NotFound desc = could not find container \"58cd5a3cb1e7fef47bebbcefdcd12da46d76ec091c0e6e9e8bcd1025976ff9b1\": container with ID starting with 58cd5a3cb1e7fef47bebbcefdcd12da46d76ec091c0e6e9e8bcd1025976ff9b1 not found: ID does not exist" Nov 25 10:51:29 crc kubenswrapper[4854]: I1125 10:51:29.029166 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e" path="/var/lib/kubelet/pods/ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e/volumes" Nov 25 10:51:31 crc kubenswrapper[4854]: I1125 10:51:31.020725 4854 scope.go:117] "RemoveContainer" containerID="d68e406a269e7d1dd3fdd504a04eacc9c67b4c85d479dac6de097c6ffe2578f3" Nov 25 10:51:31 crc kubenswrapper[4854]: E1125 10:51:31.021658 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:51:44 crc kubenswrapper[4854]: I1125 10:51:44.013911 4854 scope.go:117] "RemoveContainer" containerID="d68e406a269e7d1dd3fdd504a04eacc9c67b4c85d479dac6de097c6ffe2578f3" Nov 25 10:51:44 crc kubenswrapper[4854]: E1125 10:51:44.014734 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:51:57 crc kubenswrapper[4854]: I1125 10:51:57.013817 4854 scope.go:117] "RemoveContainer" containerID="d68e406a269e7d1dd3fdd504a04eacc9c67b4c85d479dac6de097c6ffe2578f3" Nov 25 10:51:57 crc kubenswrapper[4854]: E1125 10:51:57.014650 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:52:12 crc kubenswrapper[4854]: I1125 10:52:12.013945 4854 scope.go:117] "RemoveContainer" containerID="d68e406a269e7d1dd3fdd504a04eacc9c67b4c85d479dac6de097c6ffe2578f3" Nov 25 10:52:12 crc kubenswrapper[4854]: E1125 10:52:12.014552 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:52:26 crc kubenswrapper[4854]: I1125 10:52:26.013854 4854 scope.go:117] "RemoveContainer" containerID="d68e406a269e7d1dd3fdd504a04eacc9c67b4c85d479dac6de097c6ffe2578f3" Nov 25 10:52:26 crc kubenswrapper[4854]: E1125 10:52:26.014986 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:52:31 crc kubenswrapper[4854]: E1125 10:52:31.843872 4854 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.184:51004->38.102.83.184:43333: write tcp 38.102.83.184:51004->38.102.83.184:43333: write: broken pipe Nov 25 10:52:35 crc kubenswrapper[4854]: I1125 10:52:35.825362 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-vm788"] Nov 25 10:52:35 crc kubenswrapper[4854]: E1125 10:52:35.829132 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e" containerName="registry-server" Nov 25 10:52:35 crc kubenswrapper[4854]: I1125 10:52:35.829282 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e" containerName="registry-server" Nov 25 10:52:35 crc kubenswrapper[4854]: E1125 10:52:35.829412 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e" containerName="extract-content" Nov 25 10:52:35 crc kubenswrapper[4854]: I1125 10:52:35.829493 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e" containerName="extract-content" Nov 25 10:52:35 crc kubenswrapper[4854]: E1125 10:52:35.829705 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e" containerName="extract-utilities" Nov 25 10:52:35 crc kubenswrapper[4854]: I1125 10:52:35.829823 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e" containerName="extract-utilities" Nov 25 10:52:35 crc kubenswrapper[4854]: I1125 10:52:35.830307 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec22557b-b2d6-4e64-ba8f-70c6abeb9f5e" containerName="registry-server" Nov 25 10:52:35 crc kubenswrapper[4854]: I1125 10:52:35.833762 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vm788" Nov 25 10:52:35 crc kubenswrapper[4854]: I1125 10:52:35.871925 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vm788"] Nov 25 10:52:35 crc kubenswrapper[4854]: I1125 10:52:35.967899 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gt6ld\" (UniqueName: \"kubernetes.io/projected/e226bcf8-7fc7-4704-8573-1c05e3313bde-kube-api-access-gt6ld\") pod \"community-operators-vm788\" (UID: \"e226bcf8-7fc7-4704-8573-1c05e3313bde\") " pod="openshift-marketplace/community-operators-vm788" Nov 25 10:52:35 crc kubenswrapper[4854]: I1125 10:52:35.967962 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e226bcf8-7fc7-4704-8573-1c05e3313bde-catalog-content\") pod \"community-operators-vm788\" (UID: \"e226bcf8-7fc7-4704-8573-1c05e3313bde\") " pod="openshift-marketplace/community-operators-vm788" Nov 25 10:52:35 crc kubenswrapper[4854]: I1125 10:52:35.967982 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e226bcf8-7fc7-4704-8573-1c05e3313bde-utilities\") pod \"community-operators-vm788\" (UID: \"e226bcf8-7fc7-4704-8573-1c05e3313bde\") " pod="openshift-marketplace/community-operators-vm788" Nov 25 10:52:36 crc kubenswrapper[4854]: I1125 10:52:36.070393 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gt6ld\" (UniqueName: \"kubernetes.io/projected/e226bcf8-7fc7-4704-8573-1c05e3313bde-kube-api-access-gt6ld\") pod \"community-operators-vm788\" (UID: \"e226bcf8-7fc7-4704-8573-1c05e3313bde\") " pod="openshift-marketplace/community-operators-vm788" Nov 25 10:52:36 crc kubenswrapper[4854]: I1125 10:52:36.070465 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e226bcf8-7fc7-4704-8573-1c05e3313bde-catalog-content\") pod \"community-operators-vm788\" (UID: \"e226bcf8-7fc7-4704-8573-1c05e3313bde\") " pod="openshift-marketplace/community-operators-vm788" Nov 25 10:52:36 crc kubenswrapper[4854]: I1125 10:52:36.070495 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e226bcf8-7fc7-4704-8573-1c05e3313bde-utilities\") pod \"community-operators-vm788\" (UID: \"e226bcf8-7fc7-4704-8573-1c05e3313bde\") " pod="openshift-marketplace/community-operators-vm788" Nov 25 10:52:36 crc kubenswrapper[4854]: I1125 10:52:36.071480 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e226bcf8-7fc7-4704-8573-1c05e3313bde-catalog-content\") pod \"community-operators-vm788\" (UID: \"e226bcf8-7fc7-4704-8573-1c05e3313bde\") " pod="openshift-marketplace/community-operators-vm788" Nov 25 10:52:36 crc kubenswrapper[4854]: I1125 10:52:36.071908 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e226bcf8-7fc7-4704-8573-1c05e3313bde-utilities\") pod \"community-operators-vm788\" (UID: \"e226bcf8-7fc7-4704-8573-1c05e3313bde\") " pod="openshift-marketplace/community-operators-vm788" Nov 25 10:52:36 crc kubenswrapper[4854]: I1125 10:52:36.174556 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gt6ld\" (UniqueName: \"kubernetes.io/projected/e226bcf8-7fc7-4704-8573-1c05e3313bde-kube-api-access-gt6ld\") pod \"community-operators-vm788\" (UID: \"e226bcf8-7fc7-4704-8573-1c05e3313bde\") " pod="openshift-marketplace/community-operators-vm788" Nov 25 10:52:36 crc kubenswrapper[4854]: I1125 10:52:36.468647 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vm788" Nov 25 10:52:37 crc kubenswrapper[4854]: I1125 10:52:37.038337 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vm788"] Nov 25 10:52:38 crc kubenswrapper[4854]: I1125 10:52:38.007451 4854 generic.go:334] "Generic (PLEG): container finished" podID="e226bcf8-7fc7-4704-8573-1c05e3313bde" containerID="6e9994d6a568c0e59dd25ea240bf79cd2047ada4a7d4cf56b51bbc8299cc93b3" exitCode=0 Nov 25 10:52:38 crc kubenswrapper[4854]: I1125 10:52:38.007552 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vm788" event={"ID":"e226bcf8-7fc7-4704-8573-1c05e3313bde","Type":"ContainerDied","Data":"6e9994d6a568c0e59dd25ea240bf79cd2047ada4a7d4cf56b51bbc8299cc93b3"} Nov 25 10:52:38 crc kubenswrapper[4854]: I1125 10:52:38.008019 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vm788" event={"ID":"e226bcf8-7fc7-4704-8573-1c05e3313bde","Type":"ContainerStarted","Data":"14228e629303187852d44e927819f2822eb428459c4eface9c5548ec58db60a0"} Nov 25 10:52:40 crc kubenswrapper[4854]: I1125 10:52:40.034379 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vm788" event={"ID":"e226bcf8-7fc7-4704-8573-1c05e3313bde","Type":"ContainerStarted","Data":"3649a60162661b10cd43579c03c1ea1e24b7b2d4fbe6bbd94cc3f8849a8fb89a"} Nov 25 10:52:41 crc kubenswrapper[4854]: I1125 10:52:41.035924 4854 scope.go:117] "RemoveContainer" containerID="d68e406a269e7d1dd3fdd504a04eacc9c67b4c85d479dac6de097c6ffe2578f3" Nov 25 10:52:41 crc kubenswrapper[4854]: E1125 10:52:41.036637 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:52:43 crc kubenswrapper[4854]: I1125 10:52:43.075663 4854 generic.go:334] "Generic (PLEG): container finished" podID="e226bcf8-7fc7-4704-8573-1c05e3313bde" containerID="3649a60162661b10cd43579c03c1ea1e24b7b2d4fbe6bbd94cc3f8849a8fb89a" exitCode=0 Nov 25 10:52:43 crc kubenswrapper[4854]: I1125 10:52:43.075797 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vm788" event={"ID":"e226bcf8-7fc7-4704-8573-1c05e3313bde","Type":"ContainerDied","Data":"3649a60162661b10cd43579c03c1ea1e24b7b2d4fbe6bbd94cc3f8849a8fb89a"} Nov 25 10:52:44 crc kubenswrapper[4854]: I1125 10:52:44.091536 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vm788" event={"ID":"e226bcf8-7fc7-4704-8573-1c05e3313bde","Type":"ContainerStarted","Data":"7dbdaa84e7adf786f3a7105f0d1d062f065b1ebea5feafaf7a59621283443f32"} Nov 25 10:52:44 crc kubenswrapper[4854]: I1125 10:52:44.115902 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-vm788" podStartSLOduration=3.478717482 podStartE2EDuration="9.115886253s" podCreationTimestamp="2025-11-25 10:52:35 +0000 UTC" firstStartedPulling="2025-11-25 10:52:38.009892361 +0000 UTC m=+4563.862885737" lastFinishedPulling="2025-11-25 10:52:43.647061132 +0000 UTC m=+4569.500054508" observedRunningTime="2025-11-25 10:52:44.111724049 +0000 UTC m=+4569.964717425" watchObservedRunningTime="2025-11-25 10:52:44.115886253 +0000 UTC m=+4569.968879629" Nov 25 10:52:46 crc kubenswrapper[4854]: I1125 10:52:46.469151 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-vm788" Nov 25 10:52:46 crc kubenswrapper[4854]: I1125 10:52:46.470615 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-vm788" Nov 25 10:52:46 crc kubenswrapper[4854]: I1125 10:52:46.547205 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-vm788" Nov 25 10:52:48 crc kubenswrapper[4854]: I1125 10:52:48.188109 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-vm788" Nov 25 10:52:48 crc kubenswrapper[4854]: I1125 10:52:48.266223 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vm788"] Nov 25 10:52:50 crc kubenswrapper[4854]: I1125 10:52:50.146710 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-vm788" podUID="e226bcf8-7fc7-4704-8573-1c05e3313bde" containerName="registry-server" containerID="cri-o://7dbdaa84e7adf786f3a7105f0d1d062f065b1ebea5feafaf7a59621283443f32" gracePeriod=2 Nov 25 10:52:50 crc kubenswrapper[4854]: I1125 10:52:50.712321 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vm788" Nov 25 10:52:50 crc kubenswrapper[4854]: I1125 10:52:50.809781 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gt6ld\" (UniqueName: \"kubernetes.io/projected/e226bcf8-7fc7-4704-8573-1c05e3313bde-kube-api-access-gt6ld\") pod \"e226bcf8-7fc7-4704-8573-1c05e3313bde\" (UID: \"e226bcf8-7fc7-4704-8573-1c05e3313bde\") " Nov 25 10:52:50 crc kubenswrapper[4854]: I1125 10:52:50.809882 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e226bcf8-7fc7-4704-8573-1c05e3313bde-catalog-content\") pod \"e226bcf8-7fc7-4704-8573-1c05e3313bde\" (UID: \"e226bcf8-7fc7-4704-8573-1c05e3313bde\") " Nov 25 10:52:50 crc kubenswrapper[4854]: I1125 10:52:50.809931 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e226bcf8-7fc7-4704-8573-1c05e3313bde-utilities\") pod \"e226bcf8-7fc7-4704-8573-1c05e3313bde\" (UID: \"e226bcf8-7fc7-4704-8573-1c05e3313bde\") " Nov 25 10:52:50 crc kubenswrapper[4854]: I1125 10:52:50.811640 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e226bcf8-7fc7-4704-8573-1c05e3313bde-utilities" (OuterVolumeSpecName: "utilities") pod "e226bcf8-7fc7-4704-8573-1c05e3313bde" (UID: "e226bcf8-7fc7-4704-8573-1c05e3313bde"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:52:50 crc kubenswrapper[4854]: I1125 10:52:50.817903 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e226bcf8-7fc7-4704-8573-1c05e3313bde-kube-api-access-gt6ld" (OuterVolumeSpecName: "kube-api-access-gt6ld") pod "e226bcf8-7fc7-4704-8573-1c05e3313bde" (UID: "e226bcf8-7fc7-4704-8573-1c05e3313bde"). InnerVolumeSpecName "kube-api-access-gt6ld". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:52:50 crc kubenswrapper[4854]: I1125 10:52:50.927295 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e226bcf8-7fc7-4704-8573-1c05e3313bde-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:52:50 crc kubenswrapper[4854]: I1125 10:52:50.927640 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gt6ld\" (UniqueName: \"kubernetes.io/projected/e226bcf8-7fc7-4704-8573-1c05e3313bde-kube-api-access-gt6ld\") on node \"crc\" DevicePath \"\"" Nov 25 10:52:50 crc kubenswrapper[4854]: I1125 10:52:50.955259 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e226bcf8-7fc7-4704-8573-1c05e3313bde-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e226bcf8-7fc7-4704-8573-1c05e3313bde" (UID: "e226bcf8-7fc7-4704-8573-1c05e3313bde"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:52:51 crc kubenswrapper[4854]: I1125 10:52:51.030391 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e226bcf8-7fc7-4704-8573-1c05e3313bde-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:52:51 crc kubenswrapper[4854]: I1125 10:52:51.160921 4854 generic.go:334] "Generic (PLEG): container finished" podID="e226bcf8-7fc7-4704-8573-1c05e3313bde" containerID="7dbdaa84e7adf786f3a7105f0d1d062f065b1ebea5feafaf7a59621283443f32" exitCode=0 Nov 25 10:52:51 crc kubenswrapper[4854]: I1125 10:52:51.160983 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vm788" event={"ID":"e226bcf8-7fc7-4704-8573-1c05e3313bde","Type":"ContainerDied","Data":"7dbdaa84e7adf786f3a7105f0d1d062f065b1ebea5feafaf7a59621283443f32"} Nov 25 10:52:51 crc kubenswrapper[4854]: I1125 10:52:51.161014 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vm788" event={"ID":"e226bcf8-7fc7-4704-8573-1c05e3313bde","Type":"ContainerDied","Data":"14228e629303187852d44e927819f2822eb428459c4eface9c5548ec58db60a0"} Nov 25 10:52:51 crc kubenswrapper[4854]: I1125 10:52:51.161033 4854 scope.go:117] "RemoveContainer" containerID="7dbdaa84e7adf786f3a7105f0d1d062f065b1ebea5feafaf7a59621283443f32" Nov 25 10:52:51 crc kubenswrapper[4854]: I1125 10:52:51.161176 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vm788" Nov 25 10:52:51 crc kubenswrapper[4854]: I1125 10:52:51.188785 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vm788"] Nov 25 10:52:51 crc kubenswrapper[4854]: I1125 10:52:51.191871 4854 scope.go:117] "RemoveContainer" containerID="3649a60162661b10cd43579c03c1ea1e24b7b2d4fbe6bbd94cc3f8849a8fb89a" Nov 25 10:52:51 crc kubenswrapper[4854]: I1125 10:52:51.206291 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-vm788"] Nov 25 10:52:51 crc kubenswrapper[4854]: I1125 10:52:51.216657 4854 scope.go:117] "RemoveContainer" containerID="6e9994d6a568c0e59dd25ea240bf79cd2047ada4a7d4cf56b51bbc8299cc93b3" Nov 25 10:52:51 crc kubenswrapper[4854]: I1125 10:52:51.280915 4854 scope.go:117] "RemoveContainer" containerID="7dbdaa84e7adf786f3a7105f0d1d062f065b1ebea5feafaf7a59621283443f32" Nov 25 10:52:51 crc kubenswrapper[4854]: E1125 10:52:51.281355 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7dbdaa84e7adf786f3a7105f0d1d062f065b1ebea5feafaf7a59621283443f32\": container with ID starting with 7dbdaa84e7adf786f3a7105f0d1d062f065b1ebea5feafaf7a59621283443f32 not found: ID does not exist" containerID="7dbdaa84e7adf786f3a7105f0d1d062f065b1ebea5feafaf7a59621283443f32" Nov 25 10:52:51 crc kubenswrapper[4854]: I1125 10:52:51.281388 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7dbdaa84e7adf786f3a7105f0d1d062f065b1ebea5feafaf7a59621283443f32"} err="failed to get container status \"7dbdaa84e7adf786f3a7105f0d1d062f065b1ebea5feafaf7a59621283443f32\": rpc error: code = NotFound desc = could not find container \"7dbdaa84e7adf786f3a7105f0d1d062f065b1ebea5feafaf7a59621283443f32\": container with ID starting with 7dbdaa84e7adf786f3a7105f0d1d062f065b1ebea5feafaf7a59621283443f32 not found: ID does not exist" Nov 25 10:52:51 crc kubenswrapper[4854]: I1125 10:52:51.281415 4854 scope.go:117] "RemoveContainer" containerID="3649a60162661b10cd43579c03c1ea1e24b7b2d4fbe6bbd94cc3f8849a8fb89a" Nov 25 10:52:51 crc kubenswrapper[4854]: E1125 10:52:51.281756 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3649a60162661b10cd43579c03c1ea1e24b7b2d4fbe6bbd94cc3f8849a8fb89a\": container with ID starting with 3649a60162661b10cd43579c03c1ea1e24b7b2d4fbe6bbd94cc3f8849a8fb89a not found: ID does not exist" containerID="3649a60162661b10cd43579c03c1ea1e24b7b2d4fbe6bbd94cc3f8849a8fb89a" Nov 25 10:52:51 crc kubenswrapper[4854]: I1125 10:52:51.281812 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3649a60162661b10cd43579c03c1ea1e24b7b2d4fbe6bbd94cc3f8849a8fb89a"} err="failed to get container status \"3649a60162661b10cd43579c03c1ea1e24b7b2d4fbe6bbd94cc3f8849a8fb89a\": rpc error: code = NotFound desc = could not find container \"3649a60162661b10cd43579c03c1ea1e24b7b2d4fbe6bbd94cc3f8849a8fb89a\": container with ID starting with 3649a60162661b10cd43579c03c1ea1e24b7b2d4fbe6bbd94cc3f8849a8fb89a not found: ID does not exist" Nov 25 10:52:51 crc kubenswrapper[4854]: I1125 10:52:51.281851 4854 scope.go:117] "RemoveContainer" containerID="6e9994d6a568c0e59dd25ea240bf79cd2047ada4a7d4cf56b51bbc8299cc93b3" Nov 25 10:52:51 crc kubenswrapper[4854]: E1125 10:52:51.282204 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6e9994d6a568c0e59dd25ea240bf79cd2047ada4a7d4cf56b51bbc8299cc93b3\": container with ID starting with 6e9994d6a568c0e59dd25ea240bf79cd2047ada4a7d4cf56b51bbc8299cc93b3 not found: ID does not exist" containerID="6e9994d6a568c0e59dd25ea240bf79cd2047ada4a7d4cf56b51bbc8299cc93b3" Nov 25 10:52:51 crc kubenswrapper[4854]: I1125 10:52:51.282240 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e9994d6a568c0e59dd25ea240bf79cd2047ada4a7d4cf56b51bbc8299cc93b3"} err="failed to get container status \"6e9994d6a568c0e59dd25ea240bf79cd2047ada4a7d4cf56b51bbc8299cc93b3\": rpc error: code = NotFound desc = could not find container \"6e9994d6a568c0e59dd25ea240bf79cd2047ada4a7d4cf56b51bbc8299cc93b3\": container with ID starting with 6e9994d6a568c0e59dd25ea240bf79cd2047ada4a7d4cf56b51bbc8299cc93b3 not found: ID does not exist" Nov 25 10:52:53 crc kubenswrapper[4854]: I1125 10:52:53.014346 4854 scope.go:117] "RemoveContainer" containerID="d68e406a269e7d1dd3fdd504a04eacc9c67b4c85d479dac6de097c6ffe2578f3" Nov 25 10:52:53 crc kubenswrapper[4854]: E1125 10:52:53.015182 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:52:53 crc kubenswrapper[4854]: I1125 10:52:53.027949 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e226bcf8-7fc7-4704-8573-1c05e3313bde" path="/var/lib/kubelet/pods/e226bcf8-7fc7-4704-8573-1c05e3313bde/volumes" Nov 25 10:53:04 crc kubenswrapper[4854]: I1125 10:53:04.014411 4854 scope.go:117] "RemoveContainer" containerID="d68e406a269e7d1dd3fdd504a04eacc9c67b4c85d479dac6de097c6ffe2578f3" Nov 25 10:53:04 crc kubenswrapper[4854]: E1125 10:53:04.015130 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:53:12 crc kubenswrapper[4854]: I1125 10:53:12.686763 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-ncpcb"] Nov 25 10:53:12 crc kubenswrapper[4854]: E1125 10:53:12.687847 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e226bcf8-7fc7-4704-8573-1c05e3313bde" containerName="extract-content" Nov 25 10:53:12 crc kubenswrapper[4854]: I1125 10:53:12.687863 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="e226bcf8-7fc7-4704-8573-1c05e3313bde" containerName="extract-content" Nov 25 10:53:12 crc kubenswrapper[4854]: E1125 10:53:12.687892 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e226bcf8-7fc7-4704-8573-1c05e3313bde" containerName="registry-server" Nov 25 10:53:12 crc kubenswrapper[4854]: I1125 10:53:12.687899 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="e226bcf8-7fc7-4704-8573-1c05e3313bde" containerName="registry-server" Nov 25 10:53:12 crc kubenswrapper[4854]: E1125 10:53:12.687941 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e226bcf8-7fc7-4704-8573-1c05e3313bde" containerName="extract-utilities" Nov 25 10:53:12 crc kubenswrapper[4854]: I1125 10:53:12.687950 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="e226bcf8-7fc7-4704-8573-1c05e3313bde" containerName="extract-utilities" Nov 25 10:53:12 crc kubenswrapper[4854]: I1125 10:53:12.688251 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="e226bcf8-7fc7-4704-8573-1c05e3313bde" containerName="registry-server" Nov 25 10:53:12 crc kubenswrapper[4854]: I1125 10:53:12.690357 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ncpcb" Nov 25 10:53:12 crc kubenswrapper[4854]: I1125 10:53:12.710966 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ncpcb"] Nov 25 10:53:12 crc kubenswrapper[4854]: I1125 10:53:12.829140 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d245ca87-719e-4b15-9980-d50359199c5b-catalog-content\") pod \"redhat-marketplace-ncpcb\" (UID: \"d245ca87-719e-4b15-9980-d50359199c5b\") " pod="openshift-marketplace/redhat-marketplace-ncpcb" Nov 25 10:53:12 crc kubenswrapper[4854]: I1125 10:53:12.829287 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vtz96\" (UniqueName: \"kubernetes.io/projected/d245ca87-719e-4b15-9980-d50359199c5b-kube-api-access-vtz96\") pod \"redhat-marketplace-ncpcb\" (UID: \"d245ca87-719e-4b15-9980-d50359199c5b\") " pod="openshift-marketplace/redhat-marketplace-ncpcb" Nov 25 10:53:12 crc kubenswrapper[4854]: I1125 10:53:12.829340 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d245ca87-719e-4b15-9980-d50359199c5b-utilities\") pod \"redhat-marketplace-ncpcb\" (UID: \"d245ca87-719e-4b15-9980-d50359199c5b\") " pod="openshift-marketplace/redhat-marketplace-ncpcb" Nov 25 10:53:12 crc kubenswrapper[4854]: I1125 10:53:12.930954 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d245ca87-719e-4b15-9980-d50359199c5b-catalog-content\") pod \"redhat-marketplace-ncpcb\" (UID: \"d245ca87-719e-4b15-9980-d50359199c5b\") " pod="openshift-marketplace/redhat-marketplace-ncpcb" Nov 25 10:53:12 crc kubenswrapper[4854]: I1125 10:53:12.931090 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vtz96\" (UniqueName: \"kubernetes.io/projected/d245ca87-719e-4b15-9980-d50359199c5b-kube-api-access-vtz96\") pod \"redhat-marketplace-ncpcb\" (UID: \"d245ca87-719e-4b15-9980-d50359199c5b\") " pod="openshift-marketplace/redhat-marketplace-ncpcb" Nov 25 10:53:12 crc kubenswrapper[4854]: I1125 10:53:12.931135 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d245ca87-719e-4b15-9980-d50359199c5b-utilities\") pod \"redhat-marketplace-ncpcb\" (UID: \"d245ca87-719e-4b15-9980-d50359199c5b\") " pod="openshift-marketplace/redhat-marketplace-ncpcb" Nov 25 10:53:12 crc kubenswrapper[4854]: I1125 10:53:12.931701 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d245ca87-719e-4b15-9980-d50359199c5b-catalog-content\") pod \"redhat-marketplace-ncpcb\" (UID: \"d245ca87-719e-4b15-9980-d50359199c5b\") " pod="openshift-marketplace/redhat-marketplace-ncpcb" Nov 25 10:53:12 crc kubenswrapper[4854]: I1125 10:53:12.931823 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d245ca87-719e-4b15-9980-d50359199c5b-utilities\") pod \"redhat-marketplace-ncpcb\" (UID: \"d245ca87-719e-4b15-9980-d50359199c5b\") " pod="openshift-marketplace/redhat-marketplace-ncpcb" Nov 25 10:53:12 crc kubenswrapper[4854]: I1125 10:53:12.962980 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vtz96\" (UniqueName: \"kubernetes.io/projected/d245ca87-719e-4b15-9980-d50359199c5b-kube-api-access-vtz96\") pod \"redhat-marketplace-ncpcb\" (UID: \"d245ca87-719e-4b15-9980-d50359199c5b\") " pod="openshift-marketplace/redhat-marketplace-ncpcb" Nov 25 10:53:13 crc kubenswrapper[4854]: I1125 10:53:13.015028 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ncpcb" Nov 25 10:53:13 crc kubenswrapper[4854]: I1125 10:53:13.613463 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ncpcb"] Nov 25 10:53:14 crc kubenswrapper[4854]: I1125 10:53:14.408359 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ncpcb" event={"ID":"d245ca87-719e-4b15-9980-d50359199c5b","Type":"ContainerStarted","Data":"ed6ec0b4352a0b5e2cb6f8dcf03f31a543a16e4c9a3bd49be8e2dce994c8e3df"} Nov 25 10:53:15 crc kubenswrapper[4854]: I1125 10:53:15.464201 4854 generic.go:334] "Generic (PLEG): container finished" podID="d245ca87-719e-4b15-9980-d50359199c5b" containerID="dc5e99b820769581506ad10624d24623c10e2571a03c49841846440ca065d403" exitCode=0 Nov 25 10:53:15 crc kubenswrapper[4854]: I1125 10:53:15.464509 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ncpcb" event={"ID":"d245ca87-719e-4b15-9980-d50359199c5b","Type":"ContainerDied","Data":"dc5e99b820769581506ad10624d24623c10e2571a03c49841846440ca065d403"} Nov 25 10:53:16 crc kubenswrapper[4854]: I1125 10:53:16.016792 4854 scope.go:117] "RemoveContainer" containerID="d68e406a269e7d1dd3fdd504a04eacc9c67b4c85d479dac6de097c6ffe2578f3" Nov 25 10:53:16 crc kubenswrapper[4854]: E1125 10:53:16.017570 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:53:18 crc kubenswrapper[4854]: I1125 10:53:18.505441 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ncpcb" event={"ID":"d245ca87-719e-4b15-9980-d50359199c5b","Type":"ContainerStarted","Data":"242d08834eec576207cbd350a87200f08b5ef6aa3dd875b2ae1cf95cbf72c1c8"} Nov 25 10:53:19 crc kubenswrapper[4854]: I1125 10:53:19.523947 4854 generic.go:334] "Generic (PLEG): container finished" podID="d245ca87-719e-4b15-9980-d50359199c5b" containerID="242d08834eec576207cbd350a87200f08b5ef6aa3dd875b2ae1cf95cbf72c1c8" exitCode=0 Nov 25 10:53:19 crc kubenswrapper[4854]: I1125 10:53:19.524044 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ncpcb" event={"ID":"d245ca87-719e-4b15-9980-d50359199c5b","Type":"ContainerDied","Data":"242d08834eec576207cbd350a87200f08b5ef6aa3dd875b2ae1cf95cbf72c1c8"} Nov 25 10:53:20 crc kubenswrapper[4854]: I1125 10:53:20.537128 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ncpcb" event={"ID":"d245ca87-719e-4b15-9980-d50359199c5b","Type":"ContainerStarted","Data":"81f327be862160e3eedc1950e9c664dd848c448287bef5f685e5d661a876a583"} Nov 25 10:53:20 crc kubenswrapper[4854]: I1125 10:53:20.568179 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-ncpcb" podStartSLOduration=4.045022315 podStartE2EDuration="8.568156319s" podCreationTimestamp="2025-11-25 10:53:12 +0000 UTC" firstStartedPulling="2025-11-25 10:53:15.468852091 +0000 UTC m=+4601.321845467" lastFinishedPulling="2025-11-25 10:53:19.991986095 +0000 UTC m=+4605.844979471" observedRunningTime="2025-11-25 10:53:20.55688972 +0000 UTC m=+4606.409883106" watchObservedRunningTime="2025-11-25 10:53:20.568156319 +0000 UTC m=+4606.421149695" Nov 25 10:53:23 crc kubenswrapper[4854]: I1125 10:53:23.038911 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-ncpcb" Nov 25 10:53:23 crc kubenswrapper[4854]: I1125 10:53:23.039867 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-ncpcb" Nov 25 10:53:23 crc kubenswrapper[4854]: I1125 10:53:23.337133 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-ncpcb" Nov 25 10:53:27 crc kubenswrapper[4854]: I1125 10:53:27.017879 4854 scope.go:117] "RemoveContainer" containerID="d68e406a269e7d1dd3fdd504a04eacc9c67b4c85d479dac6de097c6ffe2578f3" Nov 25 10:53:27 crc kubenswrapper[4854]: E1125 10:53:27.018785 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:53:33 crc kubenswrapper[4854]: I1125 10:53:33.065290 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-ncpcb" Nov 25 10:53:33 crc kubenswrapper[4854]: I1125 10:53:33.118663 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ncpcb"] Nov 25 10:53:33 crc kubenswrapper[4854]: I1125 10:53:33.698054 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-ncpcb" podUID="d245ca87-719e-4b15-9980-d50359199c5b" containerName="registry-server" containerID="cri-o://81f327be862160e3eedc1950e9c664dd848c448287bef5f685e5d661a876a583" gracePeriod=2 Nov 25 10:53:34 crc kubenswrapper[4854]: I1125 10:53:34.710480 4854 generic.go:334] "Generic (PLEG): container finished" podID="d245ca87-719e-4b15-9980-d50359199c5b" containerID="81f327be862160e3eedc1950e9c664dd848c448287bef5f685e5d661a876a583" exitCode=0 Nov 25 10:53:34 crc kubenswrapper[4854]: I1125 10:53:34.710556 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ncpcb" event={"ID":"d245ca87-719e-4b15-9980-d50359199c5b","Type":"ContainerDied","Data":"81f327be862160e3eedc1950e9c664dd848c448287bef5f685e5d661a876a583"} Nov 25 10:53:35 crc kubenswrapper[4854]: I1125 10:53:35.412522 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ncpcb" Nov 25 10:53:35 crc kubenswrapper[4854]: I1125 10:53:35.565166 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d245ca87-719e-4b15-9980-d50359199c5b-catalog-content\") pod \"d245ca87-719e-4b15-9980-d50359199c5b\" (UID: \"d245ca87-719e-4b15-9980-d50359199c5b\") " Nov 25 10:53:35 crc kubenswrapper[4854]: I1125 10:53:35.565329 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vtz96\" (UniqueName: \"kubernetes.io/projected/d245ca87-719e-4b15-9980-d50359199c5b-kube-api-access-vtz96\") pod \"d245ca87-719e-4b15-9980-d50359199c5b\" (UID: \"d245ca87-719e-4b15-9980-d50359199c5b\") " Nov 25 10:53:35 crc kubenswrapper[4854]: I1125 10:53:35.565419 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d245ca87-719e-4b15-9980-d50359199c5b-utilities\") pod \"d245ca87-719e-4b15-9980-d50359199c5b\" (UID: \"d245ca87-719e-4b15-9980-d50359199c5b\") " Nov 25 10:53:35 crc kubenswrapper[4854]: I1125 10:53:35.566529 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d245ca87-719e-4b15-9980-d50359199c5b-utilities" (OuterVolumeSpecName: "utilities") pod "d245ca87-719e-4b15-9980-d50359199c5b" (UID: "d245ca87-719e-4b15-9980-d50359199c5b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:53:35 crc kubenswrapper[4854]: I1125 10:53:35.588495 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d245ca87-719e-4b15-9980-d50359199c5b-kube-api-access-vtz96" (OuterVolumeSpecName: "kube-api-access-vtz96") pod "d245ca87-719e-4b15-9980-d50359199c5b" (UID: "d245ca87-719e-4b15-9980-d50359199c5b"). InnerVolumeSpecName "kube-api-access-vtz96". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:53:35 crc kubenswrapper[4854]: I1125 10:53:35.611204 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d245ca87-719e-4b15-9980-d50359199c5b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d245ca87-719e-4b15-9980-d50359199c5b" (UID: "d245ca87-719e-4b15-9980-d50359199c5b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:53:35 crc kubenswrapper[4854]: I1125 10:53:35.668209 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d245ca87-719e-4b15-9980-d50359199c5b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:53:35 crc kubenswrapper[4854]: I1125 10:53:35.668252 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vtz96\" (UniqueName: \"kubernetes.io/projected/d245ca87-719e-4b15-9980-d50359199c5b-kube-api-access-vtz96\") on node \"crc\" DevicePath \"\"" Nov 25 10:53:35 crc kubenswrapper[4854]: I1125 10:53:35.668263 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d245ca87-719e-4b15-9980-d50359199c5b-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:53:35 crc kubenswrapper[4854]: I1125 10:53:35.724630 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ncpcb" event={"ID":"d245ca87-719e-4b15-9980-d50359199c5b","Type":"ContainerDied","Data":"ed6ec0b4352a0b5e2cb6f8dcf03f31a543a16e4c9a3bd49be8e2dce994c8e3df"} Nov 25 10:53:35 crc kubenswrapper[4854]: I1125 10:53:35.724703 4854 scope.go:117] "RemoveContainer" containerID="81f327be862160e3eedc1950e9c664dd848c448287bef5f685e5d661a876a583" Nov 25 10:53:35 crc kubenswrapper[4854]: I1125 10:53:35.725597 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ncpcb" Nov 25 10:53:35 crc kubenswrapper[4854]: I1125 10:53:35.755313 4854 scope.go:117] "RemoveContainer" containerID="242d08834eec576207cbd350a87200f08b5ef6aa3dd875b2ae1cf95cbf72c1c8" Nov 25 10:53:35 crc kubenswrapper[4854]: I1125 10:53:35.763060 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ncpcb"] Nov 25 10:53:35 crc kubenswrapper[4854]: I1125 10:53:35.775095 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-ncpcb"] Nov 25 10:53:35 crc kubenswrapper[4854]: I1125 10:53:35.791667 4854 scope.go:117] "RemoveContainer" containerID="dc5e99b820769581506ad10624d24623c10e2571a03c49841846440ca065d403" Nov 25 10:53:37 crc kubenswrapper[4854]: I1125 10:53:37.030864 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d245ca87-719e-4b15-9980-d50359199c5b" path="/var/lib/kubelet/pods/d245ca87-719e-4b15-9980-d50359199c5b/volumes" Nov 25 10:53:38 crc kubenswrapper[4854]: I1125 10:53:38.014429 4854 scope.go:117] "RemoveContainer" containerID="d68e406a269e7d1dd3fdd504a04eacc9c67b4c85d479dac6de097c6ffe2578f3" Nov 25 10:53:38 crc kubenswrapper[4854]: E1125 10:53:38.015496 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:53:52 crc kubenswrapper[4854]: I1125 10:53:52.013833 4854 scope.go:117] "RemoveContainer" containerID="d68e406a269e7d1dd3fdd504a04eacc9c67b4c85d479dac6de097c6ffe2578f3" Nov 25 10:53:52 crc kubenswrapper[4854]: E1125 10:53:52.014773 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 10:54:05 crc kubenswrapper[4854]: I1125 10:54:05.022131 4854 scope.go:117] "RemoveContainer" containerID="d68e406a269e7d1dd3fdd504a04eacc9c67b4c85d479dac6de097c6ffe2578f3" Nov 25 10:54:06 crc kubenswrapper[4854]: I1125 10:54:06.107878 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerStarted","Data":"238b078ea69f4c695ad8525d4505251672526f71bcf4eddb9ac319a0b1af07f6"} Nov 25 10:56:25 crc kubenswrapper[4854]: I1125 10:56:25.031035 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:56:25 crc kubenswrapper[4854]: I1125 10:56:25.031594 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.086310 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest"] Nov 25 10:56:53 crc kubenswrapper[4854]: E1125 10:56:53.087368 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d245ca87-719e-4b15-9980-d50359199c5b" containerName="registry-server" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.087386 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="d245ca87-719e-4b15-9980-d50359199c5b" containerName="registry-server" Nov 25 10:56:53 crc kubenswrapper[4854]: E1125 10:56:53.087402 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d245ca87-719e-4b15-9980-d50359199c5b" containerName="extract-utilities" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.087408 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="d245ca87-719e-4b15-9980-d50359199c5b" containerName="extract-utilities" Nov 25 10:56:53 crc kubenswrapper[4854]: E1125 10:56:53.087422 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d245ca87-719e-4b15-9980-d50359199c5b" containerName="extract-content" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.087428 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="d245ca87-719e-4b15-9980-d50359199c5b" containerName="extract-content" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.087715 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="d245ca87-719e-4b15-9980-d50359199c5b" containerName="registry-server" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.088563 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.094027 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.094027 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.094028 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.107994 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-s5gn4" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.109637 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.230638 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/2323d0fa-ad38-4041-b209-029ace425aa7-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"2323d0fa-ad38-4041-b209-029ace425aa7\") " pod="openstack/tempest-tests-tempest" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.230707 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/2323d0fa-ad38-4041-b209-029ace425aa7-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"2323d0fa-ad38-4041-b209-029ace425aa7\") " pod="openstack/tempest-tests-tempest" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.230943 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/2323d0fa-ad38-4041-b209-029ace425aa7-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"2323d0fa-ad38-4041-b209-029ace425aa7\") " pod="openstack/tempest-tests-tempest" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.231059 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/2323d0fa-ad38-4041-b209-029ace425aa7-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"2323d0fa-ad38-4041-b209-029ace425aa7\") " pod="openstack/tempest-tests-tempest" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.231206 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/2323d0fa-ad38-4041-b209-029ace425aa7-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"2323d0fa-ad38-4041-b209-029ace425aa7\") " pod="openstack/tempest-tests-tempest" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.231285 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2323d0fa-ad38-4041-b209-029ace425aa7-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"2323d0fa-ad38-4041-b209-029ace425aa7\") " pod="openstack/tempest-tests-tempest" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.231483 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2323d0fa-ad38-4041-b209-029ace425aa7-config-data\") pod \"tempest-tests-tempest\" (UID: \"2323d0fa-ad38-4041-b209-029ace425aa7\") " pod="openstack/tempest-tests-tempest" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.231781 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"tempest-tests-tempest\" (UID: \"2323d0fa-ad38-4041-b209-029ace425aa7\") " pod="openstack/tempest-tests-tempest" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.231815 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ct64n\" (UniqueName: \"kubernetes.io/projected/2323d0fa-ad38-4041-b209-029ace425aa7-kube-api-access-ct64n\") pod \"tempest-tests-tempest\" (UID: \"2323d0fa-ad38-4041-b209-029ace425aa7\") " pod="openstack/tempest-tests-tempest" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.334442 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2323d0fa-ad38-4041-b209-029ace425aa7-config-data\") pod \"tempest-tests-tempest\" (UID: \"2323d0fa-ad38-4041-b209-029ace425aa7\") " pod="openstack/tempest-tests-tempest" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.334942 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"tempest-tests-tempest\" (UID: \"2323d0fa-ad38-4041-b209-029ace425aa7\") " pod="openstack/tempest-tests-tempest" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.334975 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ct64n\" (UniqueName: \"kubernetes.io/projected/2323d0fa-ad38-4041-b209-029ace425aa7-kube-api-access-ct64n\") pod \"tempest-tests-tempest\" (UID: \"2323d0fa-ad38-4041-b209-029ace425aa7\") " pod="openstack/tempest-tests-tempest" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.335017 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/2323d0fa-ad38-4041-b209-029ace425aa7-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"2323d0fa-ad38-4041-b209-029ace425aa7\") " pod="openstack/tempest-tests-tempest" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.335052 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/2323d0fa-ad38-4041-b209-029ace425aa7-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"2323d0fa-ad38-4041-b209-029ace425aa7\") " pod="openstack/tempest-tests-tempest" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.335172 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/2323d0fa-ad38-4041-b209-029ace425aa7-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"2323d0fa-ad38-4041-b209-029ace425aa7\") " pod="openstack/tempest-tests-tempest" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.335213 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/2323d0fa-ad38-4041-b209-029ace425aa7-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"2323d0fa-ad38-4041-b209-029ace425aa7\") " pod="openstack/tempest-tests-tempest" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.335254 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/2323d0fa-ad38-4041-b209-029ace425aa7-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"2323d0fa-ad38-4041-b209-029ace425aa7\") " pod="openstack/tempest-tests-tempest" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.335282 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2323d0fa-ad38-4041-b209-029ace425aa7-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"2323d0fa-ad38-4041-b209-029ace425aa7\") " pod="openstack/tempest-tests-tempest" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.336008 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2323d0fa-ad38-4041-b209-029ace425aa7-config-data\") pod \"tempest-tests-tempest\" (UID: \"2323d0fa-ad38-4041-b209-029ace425aa7\") " pod="openstack/tempest-tests-tempest" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.336235 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/2323d0fa-ad38-4041-b209-029ace425aa7-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"2323d0fa-ad38-4041-b209-029ace425aa7\") " pod="openstack/tempest-tests-tempest" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.336309 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/2323d0fa-ad38-4041-b209-029ace425aa7-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"2323d0fa-ad38-4041-b209-029ace425aa7\") " pod="openstack/tempest-tests-tempest" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.336903 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/2323d0fa-ad38-4041-b209-029ace425aa7-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"2323d0fa-ad38-4041-b209-029ace425aa7\") " pod="openstack/tempest-tests-tempest" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.337613 4854 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"tempest-tests-tempest\" (UID: \"2323d0fa-ad38-4041-b209-029ace425aa7\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/tempest-tests-tempest" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.350822 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/2323d0fa-ad38-4041-b209-029ace425aa7-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"2323d0fa-ad38-4041-b209-029ace425aa7\") " pod="openstack/tempest-tests-tempest" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.351190 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/2323d0fa-ad38-4041-b209-029ace425aa7-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"2323d0fa-ad38-4041-b209-029ace425aa7\") " pod="openstack/tempest-tests-tempest" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.351566 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2323d0fa-ad38-4041-b209-029ace425aa7-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"2323d0fa-ad38-4041-b209-029ace425aa7\") " pod="openstack/tempest-tests-tempest" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.362397 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ct64n\" (UniqueName: \"kubernetes.io/projected/2323d0fa-ad38-4041-b209-029ace425aa7-kube-api-access-ct64n\") pod \"tempest-tests-tempest\" (UID: \"2323d0fa-ad38-4041-b209-029ace425aa7\") " pod="openstack/tempest-tests-tempest" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.382830 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"tempest-tests-tempest\" (UID: \"2323d0fa-ad38-4041-b209-029ace425aa7\") " pod="openstack/tempest-tests-tempest" Nov 25 10:56:53 crc kubenswrapper[4854]: I1125 10:56:53.456587 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 25 10:56:54 crc kubenswrapper[4854]: I1125 10:56:54.147183 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 25 10:56:54 crc kubenswrapper[4854]: I1125 10:56:54.153071 4854 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 10:56:54 crc kubenswrapper[4854]: I1125 10:56:54.562087 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"2323d0fa-ad38-4041-b209-029ace425aa7","Type":"ContainerStarted","Data":"0fdf31fcd304d4093a74173d7a51cc83c462e369ed469efe5e661a5830eae504"} Nov 25 10:56:55 crc kubenswrapper[4854]: I1125 10:56:55.136928 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:56:55 crc kubenswrapper[4854]: I1125 10:56:55.136989 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:57:18 crc kubenswrapper[4854]: I1125 10:57:18.476134 4854 trace.go:236] Trace[904354380]: "Calculate volume metrics of ca-trust-extracted for pod openshift-image-registry/image-registry-66df7c8f76-p64dl" (25-Nov-2025 10:57:17.142) (total time: 1331ms): Nov 25 10:57:18 crc kubenswrapper[4854]: Trace[904354380]: [1.331694403s] [1.331694403s] END Nov 25 10:57:25 crc kubenswrapper[4854]: I1125 10:57:25.033261 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:57:25 crc kubenswrapper[4854]: I1125 10:57:25.033890 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:57:25 crc kubenswrapper[4854]: I1125 10:57:25.035295 4854 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" Nov 25 10:57:25 crc kubenswrapper[4854]: I1125 10:57:25.036218 4854 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"238b078ea69f4c695ad8525d4505251672526f71bcf4eddb9ac319a0b1af07f6"} pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 10:57:25 crc kubenswrapper[4854]: I1125 10:57:25.036287 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" containerID="cri-o://238b078ea69f4c695ad8525d4505251672526f71bcf4eddb9ac319a0b1af07f6" gracePeriod=600 Nov 25 10:57:27 crc kubenswrapper[4854]: I1125 10:57:27.009918 4854 generic.go:334] "Generic (PLEG): container finished" podID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerID="238b078ea69f4c695ad8525d4505251672526f71bcf4eddb9ac319a0b1af07f6" exitCode=0 Nov 25 10:57:27 crc kubenswrapper[4854]: I1125 10:57:27.010004 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerDied","Data":"238b078ea69f4c695ad8525d4505251672526f71bcf4eddb9ac319a0b1af07f6"} Nov 25 10:57:27 crc kubenswrapper[4854]: I1125 10:57:27.010296 4854 scope.go:117] "RemoveContainer" containerID="d68e406a269e7d1dd3fdd504a04eacc9c67b4c85d479dac6de097c6ffe2578f3" Nov 25 10:58:28 crc kubenswrapper[4854]: E1125 10:58:28.586794 4854 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified" Nov 25 10:58:28 crc kubenswrapper[4854]: E1125 10:58:28.592900 4854 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:tempest-tests-tempest-tests-runner,Image:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:test-operator-ephemeral-workdir,ReadOnly:false,MountPath:/var/lib/tempest,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-temporary,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/test_operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-logs,ReadOnly:false,MountPath:/var/lib/tempest/external_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/etc/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/var/lib/tempest/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/etc/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ca-certs,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key,ReadOnly:false,MountPath:/var/lib/tempest/id_ecdsa,SubPath:ssh_key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ct64n,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42480,RunAsNonRoot:*false,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:*true,RunAsGroup:*42480,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-custom-data-s0,},Optional:nil,},SecretRef:nil,},EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-env-vars-s0,},Optional:nil,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod tempest-tests-tempest_openstack(2323d0fa-ad38-4041-b209-029ace425aa7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 10:58:28 crc kubenswrapper[4854]: E1125 10:58:28.594315 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/tempest-tests-tempest" podUID="2323d0fa-ad38-4041-b209-029ace425aa7" Nov 25 10:58:28 crc kubenswrapper[4854]: I1125 10:58:28.828447 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerStarted","Data":"28aef1fe7456732048d85d480eef2daebd6cab8e5a37cc28235f3301c14e874a"} Nov 25 10:58:28 crc kubenswrapper[4854]: E1125 10:58:28.830654 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified\\\"\"" pod="openstack/tempest-tests-tempest" podUID="2323d0fa-ad38-4041-b209-029ace425aa7" Nov 25 10:58:45 crc kubenswrapper[4854]: I1125 10:58:45.093959 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 25 10:58:52 crc kubenswrapper[4854]: I1125 10:58:52.101500 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"2323d0fa-ad38-4041-b209-029ace425aa7","Type":"ContainerStarted","Data":"7c823a2c387f1ff53e26d01629f5717ad39f63d52b5c0f205b4448265cc87453"} Nov 25 10:58:52 crc kubenswrapper[4854]: I1125 10:58:52.125063 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest" podStartSLOduration=10.186810349 podStartE2EDuration="2m1.125042535s" podCreationTimestamp="2025-11-25 10:56:51 +0000 UTC" firstStartedPulling="2025-11-25 10:56:54.15282377 +0000 UTC m=+4820.005817146" lastFinishedPulling="2025-11-25 10:58:45.091055956 +0000 UTC m=+4930.944049332" observedRunningTime="2025-11-25 10:58:52.121551589 +0000 UTC m=+4937.974544965" watchObservedRunningTime="2025-11-25 10:58:52.125042535 +0000 UTC m=+4937.978035911" Nov 25 11:00:00 crc kubenswrapper[4854]: I1125 11:00:00.315890 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401140-ccnvd"] Nov 25 11:00:00 crc kubenswrapper[4854]: I1125 11:00:00.318521 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-ccnvd" Nov 25 11:00:00 crc kubenswrapper[4854]: I1125 11:00:00.321298 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 11:00:00 crc kubenswrapper[4854]: I1125 11:00:00.321428 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 11:00:00 crc kubenswrapper[4854]: I1125 11:00:00.373428 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/922067df-5525-43a3-9bd2-d3da5f4a37f4-config-volume\") pod \"collect-profiles-29401140-ccnvd\" (UID: \"922067df-5525-43a3-9bd2-d3da5f4a37f4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-ccnvd" Nov 25 11:00:00 crc kubenswrapper[4854]: I1125 11:00:00.373492 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/922067df-5525-43a3-9bd2-d3da5f4a37f4-secret-volume\") pod \"collect-profiles-29401140-ccnvd\" (UID: \"922067df-5525-43a3-9bd2-d3da5f4a37f4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-ccnvd" Nov 25 11:00:00 crc kubenswrapper[4854]: I1125 11:00:00.373759 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bd4l4\" (UniqueName: \"kubernetes.io/projected/922067df-5525-43a3-9bd2-d3da5f4a37f4-kube-api-access-bd4l4\") pod \"collect-profiles-29401140-ccnvd\" (UID: \"922067df-5525-43a3-9bd2-d3da5f4a37f4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-ccnvd" Nov 25 11:00:00 crc kubenswrapper[4854]: I1125 11:00:00.408008 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401140-ccnvd"] Nov 25 11:00:00 crc kubenswrapper[4854]: I1125 11:00:00.476205 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bd4l4\" (UniqueName: \"kubernetes.io/projected/922067df-5525-43a3-9bd2-d3da5f4a37f4-kube-api-access-bd4l4\") pod \"collect-profiles-29401140-ccnvd\" (UID: \"922067df-5525-43a3-9bd2-d3da5f4a37f4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-ccnvd" Nov 25 11:00:00 crc kubenswrapper[4854]: I1125 11:00:00.476463 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/922067df-5525-43a3-9bd2-d3da5f4a37f4-config-volume\") pod \"collect-profiles-29401140-ccnvd\" (UID: \"922067df-5525-43a3-9bd2-d3da5f4a37f4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-ccnvd" Nov 25 11:00:00 crc kubenswrapper[4854]: I1125 11:00:00.477623 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/922067df-5525-43a3-9bd2-d3da5f4a37f4-config-volume\") pod \"collect-profiles-29401140-ccnvd\" (UID: \"922067df-5525-43a3-9bd2-d3da5f4a37f4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-ccnvd" Nov 25 11:00:00 crc kubenswrapper[4854]: I1125 11:00:00.477744 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/922067df-5525-43a3-9bd2-d3da5f4a37f4-secret-volume\") pod \"collect-profiles-29401140-ccnvd\" (UID: \"922067df-5525-43a3-9bd2-d3da5f4a37f4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-ccnvd" Nov 25 11:00:00 crc kubenswrapper[4854]: I1125 11:00:00.493214 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/922067df-5525-43a3-9bd2-d3da5f4a37f4-secret-volume\") pod \"collect-profiles-29401140-ccnvd\" (UID: \"922067df-5525-43a3-9bd2-d3da5f4a37f4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-ccnvd" Nov 25 11:00:00 crc kubenswrapper[4854]: I1125 11:00:00.494494 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bd4l4\" (UniqueName: \"kubernetes.io/projected/922067df-5525-43a3-9bd2-d3da5f4a37f4-kube-api-access-bd4l4\") pod \"collect-profiles-29401140-ccnvd\" (UID: \"922067df-5525-43a3-9bd2-d3da5f4a37f4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-ccnvd" Nov 25 11:00:00 crc kubenswrapper[4854]: I1125 11:00:00.642757 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-ccnvd" Nov 25 11:00:02 crc kubenswrapper[4854]: I1125 11:00:02.580024 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401140-ccnvd"] Nov 25 11:00:03 crc kubenswrapper[4854]: I1125 11:00:03.109579 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-ccnvd" event={"ID":"922067df-5525-43a3-9bd2-d3da5f4a37f4","Type":"ContainerStarted","Data":"c8b070dcb276c5a1465e2a5922236e1d5f2197d09dcebc5ee402addee6883032"} Nov 25 11:00:04 crc kubenswrapper[4854]: I1125 11:00:04.122459 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-ccnvd" event={"ID":"922067df-5525-43a3-9bd2-d3da5f4a37f4","Type":"ContainerStarted","Data":"cb15a737186bbdf9286c69ac5af41e68e942c2bd567569d714cffcfbfdad1542"} Nov 25 11:00:04 crc kubenswrapper[4854]: I1125 11:00:04.140370 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-ccnvd" podStartSLOduration=4.140352812 podStartE2EDuration="4.140352812s" podCreationTimestamp="2025-11-25 11:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 11:00:04.139124348 +0000 UTC m=+5009.992117734" watchObservedRunningTime="2025-11-25 11:00:04.140352812 +0000 UTC m=+5009.993346188" Nov 25 11:00:05 crc kubenswrapper[4854]: I1125 11:00:05.145533 4854 generic.go:334] "Generic (PLEG): container finished" podID="922067df-5525-43a3-9bd2-d3da5f4a37f4" containerID="cb15a737186bbdf9286c69ac5af41e68e942c2bd567569d714cffcfbfdad1542" exitCode=0 Nov 25 11:00:05 crc kubenswrapper[4854]: I1125 11:00:05.145640 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-ccnvd" event={"ID":"922067df-5525-43a3-9bd2-d3da5f4a37f4","Type":"ContainerDied","Data":"cb15a737186bbdf9286c69ac5af41e68e942c2bd567569d714cffcfbfdad1542"} Nov 25 11:00:09 crc kubenswrapper[4854]: I1125 11:00:09.654785 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-ccnvd" Nov 25 11:00:09 crc kubenswrapper[4854]: I1125 11:00:09.728274 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/922067df-5525-43a3-9bd2-d3da5f4a37f4-secret-volume\") pod \"922067df-5525-43a3-9bd2-d3da5f4a37f4\" (UID: \"922067df-5525-43a3-9bd2-d3da5f4a37f4\") " Nov 25 11:00:09 crc kubenswrapper[4854]: I1125 11:00:09.728440 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bd4l4\" (UniqueName: \"kubernetes.io/projected/922067df-5525-43a3-9bd2-d3da5f4a37f4-kube-api-access-bd4l4\") pod \"922067df-5525-43a3-9bd2-d3da5f4a37f4\" (UID: \"922067df-5525-43a3-9bd2-d3da5f4a37f4\") " Nov 25 11:00:09 crc kubenswrapper[4854]: I1125 11:00:09.728514 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/922067df-5525-43a3-9bd2-d3da5f4a37f4-config-volume\") pod \"922067df-5525-43a3-9bd2-d3da5f4a37f4\" (UID: \"922067df-5525-43a3-9bd2-d3da5f4a37f4\") " Nov 25 11:00:09 crc kubenswrapper[4854]: I1125 11:00:09.731980 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/922067df-5525-43a3-9bd2-d3da5f4a37f4-config-volume" (OuterVolumeSpecName: "config-volume") pod "922067df-5525-43a3-9bd2-d3da5f4a37f4" (UID: "922067df-5525-43a3-9bd2-d3da5f4a37f4"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 11:00:09 crc kubenswrapper[4854]: I1125 11:00:09.769498 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/922067df-5525-43a3-9bd2-d3da5f4a37f4-kube-api-access-bd4l4" (OuterVolumeSpecName: "kube-api-access-bd4l4") pod "922067df-5525-43a3-9bd2-d3da5f4a37f4" (UID: "922067df-5525-43a3-9bd2-d3da5f4a37f4"). InnerVolumeSpecName "kube-api-access-bd4l4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:00:09 crc kubenswrapper[4854]: I1125 11:00:09.802571 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/922067df-5525-43a3-9bd2-d3da5f4a37f4-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "922067df-5525-43a3-9bd2-d3da5f4a37f4" (UID: "922067df-5525-43a3-9bd2-d3da5f4a37f4"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:00:09 crc kubenswrapper[4854]: I1125 11:00:09.833435 4854 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/922067df-5525-43a3-9bd2-d3da5f4a37f4-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 11:00:09 crc kubenswrapper[4854]: I1125 11:00:09.833484 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bd4l4\" (UniqueName: \"kubernetes.io/projected/922067df-5525-43a3-9bd2-d3da5f4a37f4-kube-api-access-bd4l4\") on node \"crc\" DevicePath \"\"" Nov 25 11:00:09 crc kubenswrapper[4854]: I1125 11:00:09.833499 4854 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/922067df-5525-43a3-9bd2-d3da5f4a37f4-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 11:00:10 crc kubenswrapper[4854]: I1125 11:00:10.304495 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-ccnvd" event={"ID":"922067df-5525-43a3-9bd2-d3da5f4a37f4","Type":"ContainerDied","Data":"c8b070dcb276c5a1465e2a5922236e1d5f2197d09dcebc5ee402addee6883032"} Nov 25 11:00:10 crc kubenswrapper[4854]: I1125 11:00:10.305126 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c8b070dcb276c5a1465e2a5922236e1d5f2197d09dcebc5ee402addee6883032" Nov 25 11:00:10 crc kubenswrapper[4854]: I1125 11:00:10.305239 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-ccnvd" Nov 25 11:00:10 crc kubenswrapper[4854]: I1125 11:00:10.765223 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401095-5shv8"] Nov 25 11:00:10 crc kubenswrapper[4854]: I1125 11:00:10.779804 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401095-5shv8"] Nov 25 11:00:11 crc kubenswrapper[4854]: I1125 11:00:11.041572 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2" path="/var/lib/kubelet/pods/48b1d063-54c2-4ed5-8b5f-3f80fd9cd2f2/volumes" Nov 25 11:00:22 crc kubenswrapper[4854]: I1125 11:00:22.084289 4854 trace.go:236] Trace[124317251]: "Calculate volume metrics of catalog-content for pod openshift-marketplace/redhat-operators-pqw9x" (25-Nov-2025 11:00:20.853) (total time: 1231ms): Nov 25 11:00:22 crc kubenswrapper[4854]: Trace[124317251]: [1.231091876s] [1.231091876s] END Nov 25 11:00:55 crc kubenswrapper[4854]: I1125 11:00:55.029181 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 11:00:55 crc kubenswrapper[4854]: I1125 11:00:55.029868 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 11:00:58 crc kubenswrapper[4854]: I1125 11:00:58.006183 4854 scope.go:117] "RemoveContainer" containerID="14fe6b743a3fa38bda32b25cc316915c8661ae2e7a011841b835c439022627fe" Nov 25 11:01:00 crc kubenswrapper[4854]: I1125 11:01:00.247406 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29401141-2fwff"] Nov 25 11:01:00 crc kubenswrapper[4854]: E1125 11:01:00.248563 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="922067df-5525-43a3-9bd2-d3da5f4a37f4" containerName="collect-profiles" Nov 25 11:01:00 crc kubenswrapper[4854]: I1125 11:01:00.248581 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="922067df-5525-43a3-9bd2-d3da5f4a37f4" containerName="collect-profiles" Nov 25 11:01:00 crc kubenswrapper[4854]: I1125 11:01:00.248900 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="922067df-5525-43a3-9bd2-d3da5f4a37f4" containerName="collect-profiles" Nov 25 11:01:00 crc kubenswrapper[4854]: I1125 11:01:00.249950 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401141-2fwff" Nov 25 11:01:00 crc kubenswrapper[4854]: I1125 11:01:00.264925 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29401141-2fwff"] Nov 25 11:01:00 crc kubenswrapper[4854]: I1125 11:01:00.312551 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/915cfbe7-9150-4dc9-8a66-8195d7d3c4c8-config-data\") pod \"keystone-cron-29401141-2fwff\" (UID: \"915cfbe7-9150-4dc9-8a66-8195d7d3c4c8\") " pod="openstack/keystone-cron-29401141-2fwff" Nov 25 11:01:00 crc kubenswrapper[4854]: I1125 11:01:00.312837 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/915cfbe7-9150-4dc9-8a66-8195d7d3c4c8-fernet-keys\") pod \"keystone-cron-29401141-2fwff\" (UID: \"915cfbe7-9150-4dc9-8a66-8195d7d3c4c8\") " pod="openstack/keystone-cron-29401141-2fwff" Nov 25 11:01:00 crc kubenswrapper[4854]: I1125 11:01:00.312920 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xn4l5\" (UniqueName: \"kubernetes.io/projected/915cfbe7-9150-4dc9-8a66-8195d7d3c4c8-kube-api-access-xn4l5\") pod \"keystone-cron-29401141-2fwff\" (UID: \"915cfbe7-9150-4dc9-8a66-8195d7d3c4c8\") " pod="openstack/keystone-cron-29401141-2fwff" Nov 25 11:01:00 crc kubenswrapper[4854]: I1125 11:01:00.313149 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/915cfbe7-9150-4dc9-8a66-8195d7d3c4c8-combined-ca-bundle\") pod \"keystone-cron-29401141-2fwff\" (UID: \"915cfbe7-9150-4dc9-8a66-8195d7d3c4c8\") " pod="openstack/keystone-cron-29401141-2fwff" Nov 25 11:01:00 crc kubenswrapper[4854]: I1125 11:01:00.416037 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/915cfbe7-9150-4dc9-8a66-8195d7d3c4c8-combined-ca-bundle\") pod \"keystone-cron-29401141-2fwff\" (UID: \"915cfbe7-9150-4dc9-8a66-8195d7d3c4c8\") " pod="openstack/keystone-cron-29401141-2fwff" Nov 25 11:01:00 crc kubenswrapper[4854]: I1125 11:01:00.416241 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/915cfbe7-9150-4dc9-8a66-8195d7d3c4c8-config-data\") pod \"keystone-cron-29401141-2fwff\" (UID: \"915cfbe7-9150-4dc9-8a66-8195d7d3c4c8\") " pod="openstack/keystone-cron-29401141-2fwff" Nov 25 11:01:00 crc kubenswrapper[4854]: I1125 11:01:00.416283 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/915cfbe7-9150-4dc9-8a66-8195d7d3c4c8-fernet-keys\") pod \"keystone-cron-29401141-2fwff\" (UID: \"915cfbe7-9150-4dc9-8a66-8195d7d3c4c8\") " pod="openstack/keystone-cron-29401141-2fwff" Nov 25 11:01:00 crc kubenswrapper[4854]: I1125 11:01:00.416359 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xn4l5\" (UniqueName: \"kubernetes.io/projected/915cfbe7-9150-4dc9-8a66-8195d7d3c4c8-kube-api-access-xn4l5\") pod \"keystone-cron-29401141-2fwff\" (UID: \"915cfbe7-9150-4dc9-8a66-8195d7d3c4c8\") " pod="openstack/keystone-cron-29401141-2fwff" Nov 25 11:01:00 crc kubenswrapper[4854]: I1125 11:01:00.429607 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/915cfbe7-9150-4dc9-8a66-8195d7d3c4c8-fernet-keys\") pod \"keystone-cron-29401141-2fwff\" (UID: \"915cfbe7-9150-4dc9-8a66-8195d7d3c4c8\") " pod="openstack/keystone-cron-29401141-2fwff" Nov 25 11:01:00 crc kubenswrapper[4854]: I1125 11:01:00.429836 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/915cfbe7-9150-4dc9-8a66-8195d7d3c4c8-config-data\") pod \"keystone-cron-29401141-2fwff\" (UID: \"915cfbe7-9150-4dc9-8a66-8195d7d3c4c8\") " pod="openstack/keystone-cron-29401141-2fwff" Nov 25 11:01:00 crc kubenswrapper[4854]: I1125 11:01:00.435098 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/915cfbe7-9150-4dc9-8a66-8195d7d3c4c8-combined-ca-bundle\") pod \"keystone-cron-29401141-2fwff\" (UID: \"915cfbe7-9150-4dc9-8a66-8195d7d3c4c8\") " pod="openstack/keystone-cron-29401141-2fwff" Nov 25 11:01:00 crc kubenswrapper[4854]: I1125 11:01:00.451572 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xn4l5\" (UniqueName: \"kubernetes.io/projected/915cfbe7-9150-4dc9-8a66-8195d7d3c4c8-kube-api-access-xn4l5\") pod \"keystone-cron-29401141-2fwff\" (UID: \"915cfbe7-9150-4dc9-8a66-8195d7d3c4c8\") " pod="openstack/keystone-cron-29401141-2fwff" Nov 25 11:01:00 crc kubenswrapper[4854]: I1125 11:01:00.590637 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401141-2fwff" Nov 25 11:01:01 crc kubenswrapper[4854]: I1125 11:01:01.406561 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29401141-2fwff"] Nov 25 11:01:02 crc kubenswrapper[4854]: I1125 11:01:02.042038 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401141-2fwff" event={"ID":"915cfbe7-9150-4dc9-8a66-8195d7d3c4c8","Type":"ContainerStarted","Data":"013f376d707bcf88af7a99429131a19b4fd508e24b4208ebd440bad76e2d84fe"} Nov 25 11:01:02 crc kubenswrapper[4854]: I1125 11:01:02.042391 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401141-2fwff" event={"ID":"915cfbe7-9150-4dc9-8a66-8195d7d3c4c8","Type":"ContainerStarted","Data":"8d68ec4e19d45714137e4feb97aac3eacc3f7f15e0ae4ef1eb1383a6c7323e72"} Nov 25 11:01:02 crc kubenswrapper[4854]: I1125 11:01:02.065293 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29401141-2fwff" podStartSLOduration=2.065267836 podStartE2EDuration="2.065267836s" podCreationTimestamp="2025-11-25 11:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 11:01:02.056926798 +0000 UTC m=+5067.909920194" watchObservedRunningTime="2025-11-25 11:01:02.065267836 +0000 UTC m=+5067.918261212" Nov 25 11:01:07 crc kubenswrapper[4854]: I1125 11:01:07.127826 4854 generic.go:334] "Generic (PLEG): container finished" podID="915cfbe7-9150-4dc9-8a66-8195d7d3c4c8" containerID="013f376d707bcf88af7a99429131a19b4fd508e24b4208ebd440bad76e2d84fe" exitCode=0 Nov 25 11:01:07 crc kubenswrapper[4854]: I1125 11:01:07.127895 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401141-2fwff" event={"ID":"915cfbe7-9150-4dc9-8a66-8195d7d3c4c8","Type":"ContainerDied","Data":"013f376d707bcf88af7a99429131a19b4fd508e24b4208ebd440bad76e2d84fe"} Nov 25 11:01:08 crc kubenswrapper[4854]: I1125 11:01:08.955727 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401141-2fwff" Nov 25 11:01:09 crc kubenswrapper[4854]: I1125 11:01:09.047650 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/915cfbe7-9150-4dc9-8a66-8195d7d3c4c8-fernet-keys\") pod \"915cfbe7-9150-4dc9-8a66-8195d7d3c4c8\" (UID: \"915cfbe7-9150-4dc9-8a66-8195d7d3c4c8\") " Nov 25 11:01:09 crc kubenswrapper[4854]: I1125 11:01:09.047910 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xn4l5\" (UniqueName: \"kubernetes.io/projected/915cfbe7-9150-4dc9-8a66-8195d7d3c4c8-kube-api-access-xn4l5\") pod \"915cfbe7-9150-4dc9-8a66-8195d7d3c4c8\" (UID: \"915cfbe7-9150-4dc9-8a66-8195d7d3c4c8\") " Nov 25 11:01:09 crc kubenswrapper[4854]: I1125 11:01:09.047996 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/915cfbe7-9150-4dc9-8a66-8195d7d3c4c8-combined-ca-bundle\") pod \"915cfbe7-9150-4dc9-8a66-8195d7d3c4c8\" (UID: \"915cfbe7-9150-4dc9-8a66-8195d7d3c4c8\") " Nov 25 11:01:09 crc kubenswrapper[4854]: I1125 11:01:09.048108 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/915cfbe7-9150-4dc9-8a66-8195d7d3c4c8-config-data\") pod \"915cfbe7-9150-4dc9-8a66-8195d7d3c4c8\" (UID: \"915cfbe7-9150-4dc9-8a66-8195d7d3c4c8\") " Nov 25 11:01:09 crc kubenswrapper[4854]: I1125 11:01:09.054020 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/915cfbe7-9150-4dc9-8a66-8195d7d3c4c8-kube-api-access-xn4l5" (OuterVolumeSpecName: "kube-api-access-xn4l5") pod "915cfbe7-9150-4dc9-8a66-8195d7d3c4c8" (UID: "915cfbe7-9150-4dc9-8a66-8195d7d3c4c8"). InnerVolumeSpecName "kube-api-access-xn4l5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:01:09 crc kubenswrapper[4854]: I1125 11:01:09.058923 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/915cfbe7-9150-4dc9-8a66-8195d7d3c4c8-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "915cfbe7-9150-4dc9-8a66-8195d7d3c4c8" (UID: "915cfbe7-9150-4dc9-8a66-8195d7d3c4c8"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:01:09 crc kubenswrapper[4854]: I1125 11:01:09.088352 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/915cfbe7-9150-4dc9-8a66-8195d7d3c4c8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "915cfbe7-9150-4dc9-8a66-8195d7d3c4c8" (UID: "915cfbe7-9150-4dc9-8a66-8195d7d3c4c8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:01:09 crc kubenswrapper[4854]: I1125 11:01:09.122510 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/915cfbe7-9150-4dc9-8a66-8195d7d3c4c8-config-data" (OuterVolumeSpecName: "config-data") pod "915cfbe7-9150-4dc9-8a66-8195d7d3c4c8" (UID: "915cfbe7-9150-4dc9-8a66-8195d7d3c4c8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:01:09 crc kubenswrapper[4854]: I1125 11:01:09.155643 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/915cfbe7-9150-4dc9-8a66-8195d7d3c4c8-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 11:01:09 crc kubenswrapper[4854]: I1125 11:01:09.155961 4854 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/915cfbe7-9150-4dc9-8a66-8195d7d3c4c8-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 11:01:09 crc kubenswrapper[4854]: I1125 11:01:09.155975 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xn4l5\" (UniqueName: \"kubernetes.io/projected/915cfbe7-9150-4dc9-8a66-8195d7d3c4c8-kube-api-access-xn4l5\") on node \"crc\" DevicePath \"\"" Nov 25 11:01:09 crc kubenswrapper[4854]: I1125 11:01:09.155990 4854 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/915cfbe7-9150-4dc9-8a66-8195d7d3c4c8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 11:01:09 crc kubenswrapper[4854]: I1125 11:01:09.162207 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401141-2fwff" event={"ID":"915cfbe7-9150-4dc9-8a66-8195d7d3c4c8","Type":"ContainerDied","Data":"8d68ec4e19d45714137e4feb97aac3eacc3f7f15e0ae4ef1eb1383a6c7323e72"} Nov 25 11:01:09 crc kubenswrapper[4854]: I1125 11:01:09.162247 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8d68ec4e19d45714137e4feb97aac3eacc3f7f15e0ae4ef1eb1383a6c7323e72" Nov 25 11:01:09 crc kubenswrapper[4854]: I1125 11:01:09.162282 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401141-2fwff" Nov 25 11:01:25 crc kubenswrapper[4854]: I1125 11:01:25.028586 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 11:01:25 crc kubenswrapper[4854]: I1125 11:01:25.029117 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 11:01:53 crc kubenswrapper[4854]: I1125 11:01:53.082649 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-tvkn9"] Nov 25 11:01:53 crc kubenswrapper[4854]: E1125 11:01:53.083795 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="915cfbe7-9150-4dc9-8a66-8195d7d3c4c8" containerName="keystone-cron" Nov 25 11:01:53 crc kubenswrapper[4854]: I1125 11:01:53.083816 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="915cfbe7-9150-4dc9-8a66-8195d7d3c4c8" containerName="keystone-cron" Nov 25 11:01:53 crc kubenswrapper[4854]: I1125 11:01:53.084055 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="915cfbe7-9150-4dc9-8a66-8195d7d3c4c8" containerName="keystone-cron" Nov 25 11:01:53 crc kubenswrapper[4854]: I1125 11:01:53.086212 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tvkn9" Nov 25 11:01:53 crc kubenswrapper[4854]: I1125 11:01:53.096934 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tvkn9"] Nov 25 11:01:53 crc kubenswrapper[4854]: I1125 11:01:53.242524 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2-utilities\") pod \"redhat-operators-tvkn9\" (UID: \"2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2\") " pod="openshift-marketplace/redhat-operators-tvkn9" Nov 25 11:01:53 crc kubenswrapper[4854]: I1125 11:01:53.242896 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c2rnm\" (UniqueName: \"kubernetes.io/projected/2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2-kube-api-access-c2rnm\") pod \"redhat-operators-tvkn9\" (UID: \"2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2\") " pod="openshift-marketplace/redhat-operators-tvkn9" Nov 25 11:01:53 crc kubenswrapper[4854]: I1125 11:01:53.242933 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2-catalog-content\") pod \"redhat-operators-tvkn9\" (UID: \"2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2\") " pod="openshift-marketplace/redhat-operators-tvkn9" Nov 25 11:01:53 crc kubenswrapper[4854]: I1125 11:01:53.346114 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2-utilities\") pod \"redhat-operators-tvkn9\" (UID: \"2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2\") " pod="openshift-marketplace/redhat-operators-tvkn9" Nov 25 11:01:53 crc kubenswrapper[4854]: I1125 11:01:53.346160 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c2rnm\" (UniqueName: \"kubernetes.io/projected/2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2-kube-api-access-c2rnm\") pod \"redhat-operators-tvkn9\" (UID: \"2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2\") " pod="openshift-marketplace/redhat-operators-tvkn9" Nov 25 11:01:53 crc kubenswrapper[4854]: I1125 11:01:53.346185 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2-catalog-content\") pod \"redhat-operators-tvkn9\" (UID: \"2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2\") " pod="openshift-marketplace/redhat-operators-tvkn9" Nov 25 11:01:53 crc kubenswrapper[4854]: I1125 11:01:53.362330 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2-catalog-content\") pod \"redhat-operators-tvkn9\" (UID: \"2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2\") " pod="openshift-marketplace/redhat-operators-tvkn9" Nov 25 11:01:53 crc kubenswrapper[4854]: I1125 11:01:53.372045 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2-utilities\") pod \"redhat-operators-tvkn9\" (UID: \"2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2\") " pod="openshift-marketplace/redhat-operators-tvkn9" Nov 25 11:01:53 crc kubenswrapper[4854]: I1125 11:01:53.412081 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c2rnm\" (UniqueName: \"kubernetes.io/projected/2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2-kube-api-access-c2rnm\") pod \"redhat-operators-tvkn9\" (UID: \"2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2\") " pod="openshift-marketplace/redhat-operators-tvkn9" Nov 25 11:01:53 crc kubenswrapper[4854]: I1125 11:01:53.712304 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tvkn9" Nov 25 11:01:54 crc kubenswrapper[4854]: I1125 11:01:54.290820 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tvkn9"] Nov 25 11:01:54 crc kubenswrapper[4854]: I1125 11:01:54.683160 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvkn9" event={"ID":"2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2","Type":"ContainerStarted","Data":"76cc729fe7bd5890e7074dd6b52e60f52cd72abf1fe1d380b64c8ce2aa52e551"} Nov 25 11:01:54 crc kubenswrapper[4854]: I1125 11:01:54.683653 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvkn9" event={"ID":"2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2","Type":"ContainerStarted","Data":"6d842cb647ac8b31b86f31abdcbef241ed6aaf1d1622ecc378ce38d5b4339006"} Nov 25 11:01:55 crc kubenswrapper[4854]: I1125 11:01:55.029839 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 11:01:55 crc kubenswrapper[4854]: I1125 11:01:55.029913 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 11:01:55 crc kubenswrapper[4854]: I1125 11:01:55.029957 4854 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" Nov 25 11:01:55 crc kubenswrapper[4854]: I1125 11:01:55.040852 4854 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"28aef1fe7456732048d85d480eef2daebd6cab8e5a37cc28235f3301c14e874a"} pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 11:01:55 crc kubenswrapper[4854]: I1125 11:01:55.040967 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" containerID="cri-o://28aef1fe7456732048d85d480eef2daebd6cab8e5a37cc28235f3301c14e874a" gracePeriod=600 Nov 25 11:01:55 crc kubenswrapper[4854]: E1125 11:01:55.185073 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:01:55 crc kubenswrapper[4854]: I1125 11:01:55.697645 4854 generic.go:334] "Generic (PLEG): container finished" podID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerID="28aef1fe7456732048d85d480eef2daebd6cab8e5a37cc28235f3301c14e874a" exitCode=0 Nov 25 11:01:55 crc kubenswrapper[4854]: I1125 11:01:55.697704 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerDied","Data":"28aef1fe7456732048d85d480eef2daebd6cab8e5a37cc28235f3301c14e874a"} Nov 25 11:01:55 crc kubenswrapper[4854]: I1125 11:01:55.697759 4854 scope.go:117] "RemoveContainer" containerID="238b078ea69f4c695ad8525d4505251672526f71bcf4eddb9ac319a0b1af07f6" Nov 25 11:01:55 crc kubenswrapper[4854]: I1125 11:01:55.698592 4854 scope.go:117] "RemoveContainer" containerID="28aef1fe7456732048d85d480eef2daebd6cab8e5a37cc28235f3301c14e874a" Nov 25 11:01:55 crc kubenswrapper[4854]: E1125 11:01:55.699022 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:01:55 crc kubenswrapper[4854]: I1125 11:01:55.700584 4854 generic.go:334] "Generic (PLEG): container finished" podID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerID="76cc729fe7bd5890e7074dd6b52e60f52cd72abf1fe1d380b64c8ce2aa52e551" exitCode=0 Nov 25 11:01:55 crc kubenswrapper[4854]: I1125 11:01:55.700660 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvkn9" event={"ID":"2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2","Type":"ContainerDied","Data":"76cc729fe7bd5890e7074dd6b52e60f52cd72abf1fe1d380b64c8ce2aa52e551"} Nov 25 11:01:55 crc kubenswrapper[4854]: I1125 11:01:55.703963 4854 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 11:01:57 crc kubenswrapper[4854]: I1125 11:01:57.726575 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvkn9" event={"ID":"2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2","Type":"ContainerStarted","Data":"967cf107a88690aa3ea594ae1a552cdc32a93277044faf32b44242a40ec645b3"} Nov 25 11:02:08 crc kubenswrapper[4854]: I1125 11:02:08.014183 4854 scope.go:117] "RemoveContainer" containerID="28aef1fe7456732048d85d480eef2daebd6cab8e5a37cc28235f3301c14e874a" Nov 25 11:02:08 crc kubenswrapper[4854]: E1125 11:02:08.015021 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:02:12 crc kubenswrapper[4854]: I1125 11:02:12.917381 4854 generic.go:334] "Generic (PLEG): container finished" podID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerID="967cf107a88690aa3ea594ae1a552cdc32a93277044faf32b44242a40ec645b3" exitCode=0 Nov 25 11:02:12 crc kubenswrapper[4854]: I1125 11:02:12.917475 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvkn9" event={"ID":"2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2","Type":"ContainerDied","Data":"967cf107a88690aa3ea594ae1a552cdc32a93277044faf32b44242a40ec645b3"} Nov 25 11:02:14 crc kubenswrapper[4854]: I1125 11:02:14.943383 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvkn9" event={"ID":"2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2","Type":"ContainerStarted","Data":"53acfb905d75fc228423c03d888a5561ce21fa116c3c609cf8817c17b777afa5"} Nov 25 11:02:15 crc kubenswrapper[4854]: I1125 11:02:15.001864 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-tvkn9" podStartSLOduration=3.606307635 podStartE2EDuration="22.001843936s" podCreationTimestamp="2025-11-25 11:01:53 +0000 UTC" firstStartedPulling="2025-11-25 11:01:55.703750244 +0000 UTC m=+5121.556743610" lastFinishedPulling="2025-11-25 11:02:14.099286535 +0000 UTC m=+5139.952279911" observedRunningTime="2025-11-25 11:02:14.996752106 +0000 UTC m=+5140.849745502" watchObservedRunningTime="2025-11-25 11:02:15.001843936 +0000 UTC m=+5140.854837322" Nov 25 11:02:19 crc kubenswrapper[4854]: I1125 11:02:19.013644 4854 scope.go:117] "RemoveContainer" containerID="28aef1fe7456732048d85d480eef2daebd6cab8e5a37cc28235f3301c14e874a" Nov 25 11:02:19 crc kubenswrapper[4854]: E1125 11:02:19.014740 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:02:23 crc kubenswrapper[4854]: I1125 11:02:23.712967 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-tvkn9" Nov 25 11:02:23 crc kubenswrapper[4854]: I1125 11:02:23.713424 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-tvkn9" Nov 25 11:02:24 crc kubenswrapper[4854]: I1125 11:02:24.769264 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:02:24 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:02:24 crc kubenswrapper[4854]: > Nov 25 11:02:32 crc kubenswrapper[4854]: I1125 11:02:32.014737 4854 scope.go:117] "RemoveContainer" containerID="28aef1fe7456732048d85d480eef2daebd6cab8e5a37cc28235f3301c14e874a" Nov 25 11:02:32 crc kubenswrapper[4854]: E1125 11:02:32.015489 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:02:34 crc kubenswrapper[4854]: I1125 11:02:34.766553 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:02:34 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:02:34 crc kubenswrapper[4854]: > Nov 25 11:02:44 crc kubenswrapper[4854]: I1125 11:02:44.013874 4854 scope.go:117] "RemoveContainer" containerID="28aef1fe7456732048d85d480eef2daebd6cab8e5a37cc28235f3301c14e874a" Nov 25 11:02:44 crc kubenswrapper[4854]: E1125 11:02:44.014756 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:02:44 crc kubenswrapper[4854]: I1125 11:02:44.785602 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:02:44 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:02:44 crc kubenswrapper[4854]: > Nov 25 11:02:54 crc kubenswrapper[4854]: I1125 11:02:54.770425 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:02:54 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:02:54 crc kubenswrapper[4854]: > Nov 25 11:02:59 crc kubenswrapper[4854]: I1125 11:02:59.013745 4854 scope.go:117] "RemoveContainer" containerID="28aef1fe7456732048d85d480eef2daebd6cab8e5a37cc28235f3301c14e874a" Nov 25 11:02:59 crc kubenswrapper[4854]: E1125 11:02:59.014480 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:03:04 crc kubenswrapper[4854]: I1125 11:03:04.776201 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:03:04 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:03:04 crc kubenswrapper[4854]: > Nov 25 11:03:14 crc kubenswrapper[4854]: I1125 11:03:14.013714 4854 scope.go:117] "RemoveContainer" containerID="28aef1fe7456732048d85d480eef2daebd6cab8e5a37cc28235f3301c14e874a" Nov 25 11:03:14 crc kubenswrapper[4854]: E1125 11:03:14.014561 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:03:15 crc kubenswrapper[4854]: I1125 11:03:15.310832 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:03:15 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:03:15 crc kubenswrapper[4854]: > Nov 25 11:03:17 crc kubenswrapper[4854]: I1125 11:03:17.545906 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-scheduler-0" podUID="bfc85811-fd01-48df-99bf-1220134a32b2" containerName="cinder-scheduler" probeResult="failure" output="Get \"http://10.217.0.210:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 11:03:19 crc kubenswrapper[4854]: I1125 11:03:19.420005 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-dbg52"] Nov 25 11:03:19 crc kubenswrapper[4854]: I1125 11:03:19.476792 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dbg52" Nov 25 11:03:19 crc kubenswrapper[4854]: I1125 11:03:19.513947 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dbg52"] Nov 25 11:03:19 crc kubenswrapper[4854]: I1125 11:03:19.528487 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qjpc9\" (UniqueName: \"kubernetes.io/projected/eba909d9-2373-46db-91de-16a3c73f94ae-kube-api-access-qjpc9\") pod \"redhat-marketplace-dbg52\" (UID: \"eba909d9-2373-46db-91de-16a3c73f94ae\") " pod="openshift-marketplace/redhat-marketplace-dbg52" Nov 25 11:03:19 crc kubenswrapper[4854]: I1125 11:03:19.528659 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eba909d9-2373-46db-91de-16a3c73f94ae-catalog-content\") pod \"redhat-marketplace-dbg52\" (UID: \"eba909d9-2373-46db-91de-16a3c73f94ae\") " pod="openshift-marketplace/redhat-marketplace-dbg52" Nov 25 11:03:19 crc kubenswrapper[4854]: I1125 11:03:19.528802 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eba909d9-2373-46db-91de-16a3c73f94ae-utilities\") pod \"redhat-marketplace-dbg52\" (UID: \"eba909d9-2373-46db-91de-16a3c73f94ae\") " pod="openshift-marketplace/redhat-marketplace-dbg52" Nov 25 11:03:19 crc kubenswrapper[4854]: I1125 11:03:19.631303 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qjpc9\" (UniqueName: \"kubernetes.io/projected/eba909d9-2373-46db-91de-16a3c73f94ae-kube-api-access-qjpc9\") pod \"redhat-marketplace-dbg52\" (UID: \"eba909d9-2373-46db-91de-16a3c73f94ae\") " pod="openshift-marketplace/redhat-marketplace-dbg52" Nov 25 11:03:19 crc kubenswrapper[4854]: I1125 11:03:19.631372 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eba909d9-2373-46db-91de-16a3c73f94ae-catalog-content\") pod \"redhat-marketplace-dbg52\" (UID: \"eba909d9-2373-46db-91de-16a3c73f94ae\") " pod="openshift-marketplace/redhat-marketplace-dbg52" Nov 25 11:03:19 crc kubenswrapper[4854]: I1125 11:03:19.631425 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eba909d9-2373-46db-91de-16a3c73f94ae-utilities\") pod \"redhat-marketplace-dbg52\" (UID: \"eba909d9-2373-46db-91de-16a3c73f94ae\") " pod="openshift-marketplace/redhat-marketplace-dbg52" Nov 25 11:03:19 crc kubenswrapper[4854]: I1125 11:03:19.641651 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eba909d9-2373-46db-91de-16a3c73f94ae-utilities\") pod \"redhat-marketplace-dbg52\" (UID: \"eba909d9-2373-46db-91de-16a3c73f94ae\") " pod="openshift-marketplace/redhat-marketplace-dbg52" Nov 25 11:03:19 crc kubenswrapper[4854]: I1125 11:03:19.642805 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eba909d9-2373-46db-91de-16a3c73f94ae-catalog-content\") pod \"redhat-marketplace-dbg52\" (UID: \"eba909d9-2373-46db-91de-16a3c73f94ae\") " pod="openshift-marketplace/redhat-marketplace-dbg52" Nov 25 11:03:19 crc kubenswrapper[4854]: I1125 11:03:19.684267 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qjpc9\" (UniqueName: \"kubernetes.io/projected/eba909d9-2373-46db-91de-16a3c73f94ae-kube-api-access-qjpc9\") pod \"redhat-marketplace-dbg52\" (UID: \"eba909d9-2373-46db-91de-16a3c73f94ae\") " pod="openshift-marketplace/redhat-marketplace-dbg52" Nov 25 11:03:19 crc kubenswrapper[4854]: I1125 11:03:19.937085 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dbg52" Nov 25 11:03:22 crc kubenswrapper[4854]: I1125 11:03:22.594897 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-scheduler-0" podUID="bfc85811-fd01-48df-99bf-1220134a32b2" containerName="cinder-scheduler" probeResult="failure" output="Get \"http://10.217.0.210:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 11:03:24 crc kubenswrapper[4854]: I1125 11:03:24.370707 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dbg52"] Nov 25 11:03:24 crc kubenswrapper[4854]: I1125 11:03:24.779612 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:03:24 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:03:24 crc kubenswrapper[4854]: > Nov 25 11:03:24 crc kubenswrapper[4854]: I1125 11:03:24.897739 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dbg52" event={"ID":"eba909d9-2373-46db-91de-16a3c73f94ae","Type":"ContainerStarted","Data":"0485c9c2264b738616d97c288bf3a443e86f45c1b11c778457d50f2876d65b84"} Nov 25 11:03:25 crc kubenswrapper[4854]: I1125 11:03:25.910099 4854 generic.go:334] "Generic (PLEG): container finished" podID="eba909d9-2373-46db-91de-16a3c73f94ae" containerID="32112ab705ea8ebd7dfdfe9be8760a12aef989826bdf9224a3383e314096f744" exitCode=0 Nov 25 11:03:25 crc kubenswrapper[4854]: I1125 11:03:25.910360 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dbg52" event={"ID":"eba909d9-2373-46db-91de-16a3c73f94ae","Type":"ContainerDied","Data":"32112ab705ea8ebd7dfdfe9be8760a12aef989826bdf9224a3383e314096f744"} Nov 25 11:03:28 crc kubenswrapper[4854]: I1125 11:03:28.018859 4854 scope.go:117] "RemoveContainer" containerID="28aef1fe7456732048d85d480eef2daebd6cab8e5a37cc28235f3301c14e874a" Nov 25 11:03:28 crc kubenswrapper[4854]: E1125 11:03:28.019898 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:03:32 crc kubenswrapper[4854]: I1125 11:03:32.992857 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dbg52" event={"ID":"eba909d9-2373-46db-91de-16a3c73f94ae","Type":"ContainerStarted","Data":"76f612aef9dbad5786af0f23576e5e68b9a237dca99ea58989c812e8f8cb7389"} Nov 25 11:03:34 crc kubenswrapper[4854]: I1125 11:03:34.914327 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:03:34 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:03:34 crc kubenswrapper[4854]: > Nov 25 11:03:37 crc kubenswrapper[4854]: I1125 11:03:37.039436 4854 generic.go:334] "Generic (PLEG): container finished" podID="eba909d9-2373-46db-91de-16a3c73f94ae" containerID="76f612aef9dbad5786af0f23576e5e68b9a237dca99ea58989c812e8f8cb7389" exitCode=0 Nov 25 11:03:37 crc kubenswrapper[4854]: I1125 11:03:37.039765 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dbg52" event={"ID":"eba909d9-2373-46db-91de-16a3c73f94ae","Type":"ContainerDied","Data":"76f612aef9dbad5786af0f23576e5e68b9a237dca99ea58989c812e8f8cb7389"} Nov 25 11:03:40 crc kubenswrapper[4854]: I1125 11:03:40.013710 4854 scope.go:117] "RemoveContainer" containerID="28aef1fe7456732048d85d480eef2daebd6cab8e5a37cc28235f3301c14e874a" Nov 25 11:03:40 crc kubenswrapper[4854]: E1125 11:03:40.014650 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:03:42 crc kubenswrapper[4854]: I1125 11:03:42.098142 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dbg52" event={"ID":"eba909d9-2373-46db-91de-16a3c73f94ae","Type":"ContainerStarted","Data":"c4fd23410847c048147a1af138bdf767b44d62f681ca63216ef6652e7f2bdb09"} Nov 25 11:03:44 crc kubenswrapper[4854]: I1125 11:03:44.766355 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:03:44 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:03:44 crc kubenswrapper[4854]: > Nov 25 11:03:47 crc kubenswrapper[4854]: I1125 11:03:47.770018 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-northd-0" podUID="6c660812-03bd-4475-895a-d896c14ef125" containerName="ovn-northd" probeResult="failure" output="command timed out" Nov 25 11:03:47 crc kubenswrapper[4854]: I1125 11:03:47.770955 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-galera-0" podUID="0984ac84-1833-4ddb-b21b-d526b64e9991" containerName="galera" probeResult="failure" output="command timed out" Nov 25 11:03:47 crc kubenswrapper[4854]: I1125 11:03:47.771001 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="0984ac84-1833-4ddb-b21b-d526b64e9991" containerName="galera" probeResult="failure" output="command timed out" Nov 25 11:03:47 crc kubenswrapper[4854]: I1125 11:03:47.771066 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ovn-northd-0" podUID="6c660812-03bd-4475-895a-d896c14ef125" containerName="ovn-northd" probeResult="failure" output="command timed out" Nov 25 11:03:49 crc kubenswrapper[4854]: I1125 11:03:49.992494 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-dbg52" Nov 25 11:03:50 crc kubenswrapper[4854]: I1125 11:03:49.993327 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-dbg52" Nov 25 11:03:50 crc kubenswrapper[4854]: I1125 11:03:50.448279 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-mzqj2" podUID="8ee67274-c034-4307-a53e-2655baa2d521" containerName="registry-server" probeResult="failure" output=< Nov 25 11:03:50 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:03:50 crc kubenswrapper[4854]: > Nov 25 11:03:50 crc kubenswrapper[4854]: I1125 11:03:50.450570 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/redhat-marketplace-mzqj2" podUID="8ee67274-c034-4307-a53e-2655baa2d521" containerName="registry-server" probeResult="failure" output=< Nov 25 11:03:50 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:03:50 crc kubenswrapper[4854]: > Nov 25 11:03:51 crc kubenswrapper[4854]: I1125 11:03:51.051355 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-dbg52" podUID="eba909d9-2373-46db-91de-16a3c73f94ae" containerName="registry-server" probeResult="failure" output=< Nov 25 11:03:51 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:03:51 crc kubenswrapper[4854]: > Nov 25 11:03:54 crc kubenswrapper[4854]: I1125 11:03:54.773863 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:03:54 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:03:54 crc kubenswrapper[4854]: > Nov 25 11:03:54 crc kubenswrapper[4854]: I1125 11:03:54.775097 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-tvkn9" Nov 25 11:03:54 crc kubenswrapper[4854]: I1125 11:03:54.777825 4854 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="registry-server" containerStatusID={"Type":"cri-o","ID":"53acfb905d75fc228423c03d888a5561ce21fa116c3c609cf8817c17b777afa5"} pod="openshift-marketplace/redhat-operators-tvkn9" containerMessage="Container registry-server failed startup probe, will be restarted" Nov 25 11:03:54 crc kubenswrapper[4854]: I1125 11:03:54.778505 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" containerID="cri-o://53acfb905d75fc228423c03d888a5561ce21fa116c3c609cf8817c17b777afa5" gracePeriod=30 Nov 25 11:03:55 crc kubenswrapper[4854]: I1125 11:03:55.171583 4854 scope.go:117] "RemoveContainer" containerID="28aef1fe7456732048d85d480eef2daebd6cab8e5a37cc28235f3301c14e874a" Nov 25 11:03:55 crc kubenswrapper[4854]: E1125 11:03:55.171907 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:04:01 crc kubenswrapper[4854]: I1125 11:04:01.657353 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-dbg52" podUID="eba909d9-2373-46db-91de-16a3c73f94ae" containerName="registry-server" probeResult="failure" output=< Nov 25 11:04:01 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:04:01 crc kubenswrapper[4854]: > Nov 25 11:04:09 crc kubenswrapper[4854]: I1125 11:04:09.015737 4854 scope.go:117] "RemoveContainer" containerID="28aef1fe7456732048d85d480eef2daebd6cab8e5a37cc28235f3301c14e874a" Nov 25 11:04:09 crc kubenswrapper[4854]: E1125 11:04:09.016860 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:04:11 crc kubenswrapper[4854]: I1125 11:04:11.122708 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-dbg52" podUID="eba909d9-2373-46db-91de-16a3c73f94ae" containerName="registry-server" probeResult="failure" output=< Nov 25 11:04:11 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:04:11 crc kubenswrapper[4854]: > Nov 25 11:04:17 crc kubenswrapper[4854]: I1125 11:04:17.775646 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-galera-0" podUID="0984ac84-1833-4ddb-b21b-d526b64e9991" containerName="galera" probeResult="failure" output="command timed out" Nov 25 11:04:17 crc kubenswrapper[4854]: I1125 11:04:17.778303 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="0984ac84-1833-4ddb-b21b-d526b64e9991" containerName="galera" probeResult="failure" output="command timed out" Nov 25 11:04:20 crc kubenswrapper[4854]: I1125 11:04:20.039079 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-dbg52" Nov 25 11:04:20 crc kubenswrapper[4854]: I1125 11:04:20.102031 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-dbg52" Nov 25 11:04:20 crc kubenswrapper[4854]: I1125 11:04:20.107479 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-dbg52" podStartSLOduration=46.108929568 podStartE2EDuration="1m1.091850866s" podCreationTimestamp="2025-11-25 11:03:19 +0000 UTC" firstStartedPulling="2025-11-25 11:03:25.912703478 +0000 UTC m=+5211.765696854" lastFinishedPulling="2025-11-25 11:03:40.895624786 +0000 UTC m=+5226.748618152" observedRunningTime="2025-11-25 11:03:42.118548847 +0000 UTC m=+5227.971542223" watchObservedRunningTime="2025-11-25 11:04:20.091850866 +0000 UTC m=+5265.944844242" Nov 25 11:04:21 crc kubenswrapper[4854]: I1125 11:04:21.634343 4854 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.129853112s: [/var/lib/containers/storage/overlay/08dddce3661eadd6e3193d65b04bc83cbc3d930046f648a29bb4a619e4c042cf/diff /var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-gwfmz_3879e5ff-7566-4cd8-bcac-e8c07a79f965/operator/0.log]; will not log again for this container unless duration exceeds 2s Nov 25 11:04:21 crc kubenswrapper[4854]: I1125 11:04:21.694746 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dbg52"] Nov 25 11:04:21 crc kubenswrapper[4854]: I1125 11:04:21.724464 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-dbg52" podUID="eba909d9-2373-46db-91de-16a3c73f94ae" containerName="registry-server" containerID="cri-o://c4fd23410847c048147a1af138bdf767b44d62f681ca63216ef6652e7f2bdb09" gracePeriod=2 Nov 25 11:04:22 crc kubenswrapper[4854]: I1125 11:04:22.558639 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dbg52" event={"ID":"eba909d9-2373-46db-91de-16a3c73f94ae","Type":"ContainerDied","Data":"c4fd23410847c048147a1af138bdf767b44d62f681ca63216ef6652e7f2bdb09"} Nov 25 11:04:22 crc kubenswrapper[4854]: I1125 11:04:22.562400 4854 generic.go:334] "Generic (PLEG): container finished" podID="eba909d9-2373-46db-91de-16a3c73f94ae" containerID="c4fd23410847c048147a1af138bdf767b44d62f681ca63216ef6652e7f2bdb09" exitCode=0 Nov 25 11:04:24 crc kubenswrapper[4854]: I1125 11:04:24.020328 4854 scope.go:117] "RemoveContainer" containerID="28aef1fe7456732048d85d480eef2daebd6cab8e5a37cc28235f3301c14e874a" Nov 25 11:04:24 crc kubenswrapper[4854]: E1125 11:04:24.026129 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:04:25 crc kubenswrapper[4854]: I1125 11:04:25.057860 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dbg52" Nov 25 11:04:25 crc kubenswrapper[4854]: I1125 11:04:25.275000 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qjpc9\" (UniqueName: \"kubernetes.io/projected/eba909d9-2373-46db-91de-16a3c73f94ae-kube-api-access-qjpc9\") pod \"eba909d9-2373-46db-91de-16a3c73f94ae\" (UID: \"eba909d9-2373-46db-91de-16a3c73f94ae\") " Nov 25 11:04:25 crc kubenswrapper[4854]: I1125 11:04:25.275144 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eba909d9-2373-46db-91de-16a3c73f94ae-utilities\") pod \"eba909d9-2373-46db-91de-16a3c73f94ae\" (UID: \"eba909d9-2373-46db-91de-16a3c73f94ae\") " Nov 25 11:04:25 crc kubenswrapper[4854]: I1125 11:04:25.275286 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eba909d9-2373-46db-91de-16a3c73f94ae-catalog-content\") pod \"eba909d9-2373-46db-91de-16a3c73f94ae\" (UID: \"eba909d9-2373-46db-91de-16a3c73f94ae\") " Nov 25 11:04:25 crc kubenswrapper[4854]: I1125 11:04:25.406076 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eba909d9-2373-46db-91de-16a3c73f94ae-utilities" (OuterVolumeSpecName: "utilities") pod "eba909d9-2373-46db-91de-16a3c73f94ae" (UID: "eba909d9-2373-46db-91de-16a3c73f94ae"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:04:25 crc kubenswrapper[4854]: I1125 11:04:25.430381 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-x8rn9"] Nov 25 11:04:25 crc kubenswrapper[4854]: E1125 11:04:25.437208 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eba909d9-2373-46db-91de-16a3c73f94ae" containerName="registry-server" Nov 25 11:04:25 crc kubenswrapper[4854]: I1125 11:04:25.437240 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="eba909d9-2373-46db-91de-16a3c73f94ae" containerName="registry-server" Nov 25 11:04:25 crc kubenswrapper[4854]: E1125 11:04:25.437270 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eba909d9-2373-46db-91de-16a3c73f94ae" containerName="extract-content" Nov 25 11:04:25 crc kubenswrapper[4854]: I1125 11:04:25.437276 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="eba909d9-2373-46db-91de-16a3c73f94ae" containerName="extract-content" Nov 25 11:04:25 crc kubenswrapper[4854]: E1125 11:04:25.437299 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eba909d9-2373-46db-91de-16a3c73f94ae" containerName="extract-utilities" Nov 25 11:04:25 crc kubenswrapper[4854]: I1125 11:04:25.437305 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="eba909d9-2373-46db-91de-16a3c73f94ae" containerName="extract-utilities" Nov 25 11:04:25 crc kubenswrapper[4854]: I1125 11:04:25.445893 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="eba909d9-2373-46db-91de-16a3c73f94ae" containerName="registry-server" Nov 25 11:04:25 crc kubenswrapper[4854]: I1125 11:04:25.469812 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x8rn9" Nov 25 11:04:25 crc kubenswrapper[4854]: I1125 11:04:25.470491 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eba909d9-2373-46db-91de-16a3c73f94ae-kube-api-access-qjpc9" (OuterVolumeSpecName: "kube-api-access-qjpc9") pod "eba909d9-2373-46db-91de-16a3c73f94ae" (UID: "eba909d9-2373-46db-91de-16a3c73f94ae"). InnerVolumeSpecName "kube-api-access-qjpc9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:04:25 crc kubenswrapper[4854]: I1125 11:04:25.503344 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qjpc9\" (UniqueName: \"kubernetes.io/projected/eba909d9-2373-46db-91de-16a3c73f94ae-kube-api-access-qjpc9\") on node \"crc\" DevicePath \"\"" Nov 25 11:04:25 crc kubenswrapper[4854]: I1125 11:04:25.503395 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eba909d9-2373-46db-91de-16a3c73f94ae-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 11:04:25 crc kubenswrapper[4854]: I1125 11:04:25.604389 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e46d750e-cbe6-4e6c-ab4e-0256c54ba262-catalog-content\") pod \"community-operators-x8rn9\" (UID: \"e46d750e-cbe6-4e6c-ab4e-0256c54ba262\") " pod="openshift-marketplace/community-operators-x8rn9" Nov 25 11:04:25 crc kubenswrapper[4854]: I1125 11:04:25.604497 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e46d750e-cbe6-4e6c-ab4e-0256c54ba262-utilities\") pod \"community-operators-x8rn9\" (UID: \"e46d750e-cbe6-4e6c-ab4e-0256c54ba262\") " pod="openshift-marketplace/community-operators-x8rn9" Nov 25 11:04:25 crc kubenswrapper[4854]: I1125 11:04:25.604778 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6pjj2\" (UniqueName: \"kubernetes.io/projected/e46d750e-cbe6-4e6c-ab4e-0256c54ba262-kube-api-access-6pjj2\") pod \"community-operators-x8rn9\" (UID: \"e46d750e-cbe6-4e6c-ab4e-0256c54ba262\") " pod="openshift-marketplace/community-operators-x8rn9" Nov 25 11:04:25 crc kubenswrapper[4854]: I1125 11:04:25.613956 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dbg52" Nov 25 11:04:25 crc kubenswrapper[4854]: I1125 11:04:25.613969 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dbg52" event={"ID":"eba909d9-2373-46db-91de-16a3c73f94ae","Type":"ContainerDied","Data":"0485c9c2264b738616d97c288bf3a443e86f45c1b11c778457d50f2876d65b84"} Nov 25 11:04:25 crc kubenswrapper[4854]: I1125 11:04:25.614024 4854 scope.go:117] "RemoveContainer" containerID="c4fd23410847c048147a1af138bdf767b44d62f681ca63216ef6652e7f2bdb09" Nov 25 11:04:25 crc kubenswrapper[4854]: I1125 11:04:25.624409 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tvkn9_2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2/registry-server/0.log" Nov 25 11:04:25 crc kubenswrapper[4854]: I1125 11:04:25.637470 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-x8rn9"] Nov 25 11:04:25 crc kubenswrapper[4854]: I1125 11:04:25.707031 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6pjj2\" (UniqueName: \"kubernetes.io/projected/e46d750e-cbe6-4e6c-ab4e-0256c54ba262-kube-api-access-6pjj2\") pod \"community-operators-x8rn9\" (UID: \"e46d750e-cbe6-4e6c-ab4e-0256c54ba262\") " pod="openshift-marketplace/community-operators-x8rn9" Nov 25 11:04:25 crc kubenswrapper[4854]: I1125 11:04:25.707086 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e46d750e-cbe6-4e6c-ab4e-0256c54ba262-catalog-content\") pod \"community-operators-x8rn9\" (UID: \"e46d750e-cbe6-4e6c-ab4e-0256c54ba262\") " pod="openshift-marketplace/community-operators-x8rn9" Nov 25 11:04:25 crc kubenswrapper[4854]: I1125 11:04:25.707130 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e46d750e-cbe6-4e6c-ab4e-0256c54ba262-utilities\") pod \"community-operators-x8rn9\" (UID: \"e46d750e-cbe6-4e6c-ab4e-0256c54ba262\") " pod="openshift-marketplace/community-operators-x8rn9" Nov 25 11:04:25 crc kubenswrapper[4854]: I1125 11:04:25.716592 4854 generic.go:334] "Generic (PLEG): container finished" podID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerID="53acfb905d75fc228423c03d888a5561ce21fa116c3c609cf8817c17b777afa5" exitCode=137 Nov 25 11:04:25 crc kubenswrapper[4854]: I1125 11:04:25.716647 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvkn9" event={"ID":"2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2","Type":"ContainerDied","Data":"53acfb905d75fc228423c03d888a5561ce21fa116c3c609cf8817c17b777afa5"} Nov 25 11:04:25 crc kubenswrapper[4854]: I1125 11:04:25.759617 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eba909d9-2373-46db-91de-16a3c73f94ae-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "eba909d9-2373-46db-91de-16a3c73f94ae" (UID: "eba909d9-2373-46db-91de-16a3c73f94ae"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:04:25 crc kubenswrapper[4854]: I1125 11:04:25.775155 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e46d750e-cbe6-4e6c-ab4e-0256c54ba262-utilities\") pod \"community-operators-x8rn9\" (UID: \"e46d750e-cbe6-4e6c-ab4e-0256c54ba262\") " pod="openshift-marketplace/community-operators-x8rn9" Nov 25 11:04:25 crc kubenswrapper[4854]: I1125 11:04:25.776032 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e46d750e-cbe6-4e6c-ab4e-0256c54ba262-catalog-content\") pod \"community-operators-x8rn9\" (UID: \"e46d750e-cbe6-4e6c-ab4e-0256c54ba262\") " pod="openshift-marketplace/community-operators-x8rn9" Nov 25 11:04:25 crc kubenswrapper[4854]: I1125 11:04:25.790249 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6pjj2\" (UniqueName: \"kubernetes.io/projected/e46d750e-cbe6-4e6c-ab4e-0256c54ba262-kube-api-access-6pjj2\") pod \"community-operators-x8rn9\" (UID: \"e46d750e-cbe6-4e6c-ab4e-0256c54ba262\") " pod="openshift-marketplace/community-operators-x8rn9" Nov 25 11:04:25 crc kubenswrapper[4854]: I1125 11:04:25.810106 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eba909d9-2373-46db-91de-16a3c73f94ae-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 11:04:25 crc kubenswrapper[4854]: I1125 11:04:25.855662 4854 scope.go:117] "RemoveContainer" containerID="76f612aef9dbad5786af0f23576e5e68b9a237dca99ea58989c812e8f8cb7389" Nov 25 11:04:25 crc kubenswrapper[4854]: I1125 11:04:25.971568 4854 scope.go:117] "RemoveContainer" containerID="32112ab705ea8ebd7dfdfe9be8760a12aef989826bdf9224a3383e314096f744" Nov 25 11:04:26 crc kubenswrapper[4854]: I1125 11:04:26.033387 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dbg52"] Nov 25 11:04:26 crc kubenswrapper[4854]: I1125 11:04:26.050353 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-dbg52"] Nov 25 11:04:26 crc kubenswrapper[4854]: I1125 11:04:26.062597 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x8rn9" Nov 25 11:04:27 crc kubenswrapper[4854]: I1125 11:04:26.969320 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-54qq5"] Nov 25 11:04:27 crc kubenswrapper[4854]: I1125 11:04:26.972440 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-54qq5" Nov 25 11:04:27 crc kubenswrapper[4854]: I1125 11:04:27.132169 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba9ffeb1-440a-4fe9-bc51-6343bc4582ff-catalog-content\") pod \"certified-operators-54qq5\" (UID: \"ba9ffeb1-440a-4fe9-bc51-6343bc4582ff\") " pod="openshift-marketplace/certified-operators-54qq5" Nov 25 11:04:27 crc kubenswrapper[4854]: I1125 11:04:27.132724 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rdcpb\" (UniqueName: \"kubernetes.io/projected/ba9ffeb1-440a-4fe9-bc51-6343bc4582ff-kube-api-access-rdcpb\") pod \"certified-operators-54qq5\" (UID: \"ba9ffeb1-440a-4fe9-bc51-6343bc4582ff\") " pod="openshift-marketplace/certified-operators-54qq5" Nov 25 11:04:27 crc kubenswrapper[4854]: I1125 11:04:27.133181 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba9ffeb1-440a-4fe9-bc51-6343bc4582ff-utilities\") pod \"certified-operators-54qq5\" (UID: \"ba9ffeb1-440a-4fe9-bc51-6343bc4582ff\") " pod="openshift-marketplace/certified-operators-54qq5" Nov 25 11:04:27 crc kubenswrapper[4854]: I1125 11:04:27.135432 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eba909d9-2373-46db-91de-16a3c73f94ae" path="/var/lib/kubelet/pods/eba909d9-2373-46db-91de-16a3c73f94ae/volumes" Nov 25 11:04:27 crc kubenswrapper[4854]: I1125 11:04:27.145043 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-54qq5"] Nov 25 11:04:27 crc kubenswrapper[4854]: I1125 11:04:27.235307 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba9ffeb1-440a-4fe9-bc51-6343bc4582ff-utilities\") pod \"certified-operators-54qq5\" (UID: \"ba9ffeb1-440a-4fe9-bc51-6343bc4582ff\") " pod="openshift-marketplace/certified-operators-54qq5" Nov 25 11:04:27 crc kubenswrapper[4854]: I1125 11:04:27.235516 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba9ffeb1-440a-4fe9-bc51-6343bc4582ff-catalog-content\") pod \"certified-operators-54qq5\" (UID: \"ba9ffeb1-440a-4fe9-bc51-6343bc4582ff\") " pod="openshift-marketplace/certified-operators-54qq5" Nov 25 11:04:27 crc kubenswrapper[4854]: I1125 11:04:27.235586 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdcpb\" (UniqueName: \"kubernetes.io/projected/ba9ffeb1-440a-4fe9-bc51-6343bc4582ff-kube-api-access-rdcpb\") pod \"certified-operators-54qq5\" (UID: \"ba9ffeb1-440a-4fe9-bc51-6343bc4582ff\") " pod="openshift-marketplace/certified-operators-54qq5" Nov 25 11:04:27 crc kubenswrapper[4854]: I1125 11:04:27.236837 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba9ffeb1-440a-4fe9-bc51-6343bc4582ff-catalog-content\") pod \"certified-operators-54qq5\" (UID: \"ba9ffeb1-440a-4fe9-bc51-6343bc4582ff\") " pod="openshift-marketplace/certified-operators-54qq5" Nov 25 11:04:27 crc kubenswrapper[4854]: I1125 11:04:27.237384 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba9ffeb1-440a-4fe9-bc51-6343bc4582ff-utilities\") pod \"certified-operators-54qq5\" (UID: \"ba9ffeb1-440a-4fe9-bc51-6343bc4582ff\") " pod="openshift-marketplace/certified-operators-54qq5" Nov 25 11:04:27 crc kubenswrapper[4854]: I1125 11:04:27.292868 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdcpb\" (UniqueName: \"kubernetes.io/projected/ba9ffeb1-440a-4fe9-bc51-6343bc4582ff-kube-api-access-rdcpb\") pod \"certified-operators-54qq5\" (UID: \"ba9ffeb1-440a-4fe9-bc51-6343bc4582ff\") " pod="openshift-marketplace/certified-operators-54qq5" Nov 25 11:04:27 crc kubenswrapper[4854]: I1125 11:04:27.433855 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-54qq5" Nov 25 11:04:27 crc kubenswrapper[4854]: I1125 11:04:27.721529 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-x8rn9"] Nov 25 11:04:27 crc kubenswrapper[4854]: W1125 11:04:27.862999 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode46d750e_cbe6_4e6c_ab4e_0256c54ba262.slice/crio-8351cfb1ab31245ec311ff5f94dcc2e0af5aa80c64db2f7ffba9ec62bdd68d8e WatchSource:0}: Error finding container 8351cfb1ab31245ec311ff5f94dcc2e0af5aa80c64db2f7ffba9ec62bdd68d8e: Status 404 returned error can't find the container with id 8351cfb1ab31245ec311ff5f94dcc2e0af5aa80c64db2f7ffba9ec62bdd68d8e Nov 25 11:04:28 crc kubenswrapper[4854]: I1125 11:04:28.370243 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-54qq5"] Nov 25 11:04:28 crc kubenswrapper[4854]: I1125 11:04:28.815035 4854 generic.go:334] "Generic (PLEG): container finished" podID="e46d750e-cbe6-4e6c-ab4e-0256c54ba262" containerID="45d24d402bd2e15b1883e9024c304e859fa056b8c44a7e00af5c7f2b5988c8e0" exitCode=0 Nov 25 11:04:28 crc kubenswrapper[4854]: I1125 11:04:28.815379 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x8rn9" event={"ID":"e46d750e-cbe6-4e6c-ab4e-0256c54ba262","Type":"ContainerDied","Data":"45d24d402bd2e15b1883e9024c304e859fa056b8c44a7e00af5c7f2b5988c8e0"} Nov 25 11:04:28 crc kubenswrapper[4854]: I1125 11:04:28.815407 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x8rn9" event={"ID":"e46d750e-cbe6-4e6c-ab4e-0256c54ba262","Type":"ContainerStarted","Data":"8351cfb1ab31245ec311ff5f94dcc2e0af5aa80c64db2f7ffba9ec62bdd68d8e"} Nov 25 11:04:28 crc kubenswrapper[4854]: I1125 11:04:28.820157 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-54qq5" event={"ID":"ba9ffeb1-440a-4fe9-bc51-6343bc4582ff","Type":"ContainerStarted","Data":"75d1b80ff159010be726aaf99633e6b7d96e85da2e5ec473bc11f17899eed3cf"} Nov 25 11:04:28 crc kubenswrapper[4854]: I1125 11:04:28.842183 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tvkn9_2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2/registry-server/0.log" Nov 25 11:04:28 crc kubenswrapper[4854]: I1125 11:04:28.845172 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvkn9" event={"ID":"2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2","Type":"ContainerStarted","Data":"0a06a0a5005bae88d7e0fb4f2767ea452c6ac869c412879398b882d149ba3a53"} Nov 25 11:04:29 crc kubenswrapper[4854]: I1125 11:04:29.855405 4854 generic.go:334] "Generic (PLEG): container finished" podID="ba9ffeb1-440a-4fe9-bc51-6343bc4582ff" containerID="95657b1991f56b3f2e894656830746e0d60e27bb665a7e24e34c0656d9f390b8" exitCode=0 Nov 25 11:04:29 crc kubenswrapper[4854]: I1125 11:04:29.855491 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-54qq5" event={"ID":"ba9ffeb1-440a-4fe9-bc51-6343bc4582ff","Type":"ContainerDied","Data":"95657b1991f56b3f2e894656830746e0d60e27bb665a7e24e34c0656d9f390b8"} Nov 25 11:04:30 crc kubenswrapper[4854]: I1125 11:04:30.870303 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x8rn9" event={"ID":"e46d750e-cbe6-4e6c-ab4e-0256c54ba262","Type":"ContainerStarted","Data":"ee05143f485a66bce92d2fa9c6505c7161404a8b4fa7635c4d3eee63c9c00c96"} Nov 25 11:04:31 crc kubenswrapper[4854]: I1125 11:04:31.886140 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-54qq5" event={"ID":"ba9ffeb1-440a-4fe9-bc51-6343bc4582ff","Type":"ContainerStarted","Data":"156aa2497f689b427f46e201b987a3731260cca36d01c694678c56ccb69ae804"} Nov 25 11:04:33 crc kubenswrapper[4854]: I1125 11:04:33.749537 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-tvkn9" Nov 25 11:04:33 crc kubenswrapper[4854]: I1125 11:04:33.749948 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-tvkn9" Nov 25 11:04:33 crc kubenswrapper[4854]: I1125 11:04:33.940876 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/heat-operator-controller-manager-774b86978c-zp5bz" podUID="1da6b327-2319-4a92-9555-736d992a3348" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.106:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 11:04:33 crc kubenswrapper[4854]: I1125 11:04:33.940865 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/heat-operator-controller-manager-774b86978c-zp5bz" podUID="1da6b327-2319-4a92-9555-736d992a3348" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.106:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 11:04:34 crc kubenswrapper[4854]: I1125 11:04:34.171865 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jx54m" podUID="13c658f9-13c2-43d5-9b8a-d30484e5943f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.108:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 11:04:34 crc kubenswrapper[4854]: I1125 11:04:34.255015 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jx54m" podUID="13c658f9-13c2-43d5-9b8a-d30484e5943f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.108:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 11:04:34 crc kubenswrapper[4854]: I1125 11:04:34.273045 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-sks98" podUID="d51e017c-9c85-443a-a5b9-b8b8969bb019" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.110:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 11:04:34 crc kubenswrapper[4854]: I1125 11:04:34.273098 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-7l4fm" podUID="5c0186e4-d72a-4281-ab41-d012f2d4d775" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.109:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 11:04:34 crc kubenswrapper[4854]: I1125 11:04:34.273249 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-7l4fm" podUID="5c0186e4-d72a-4281-ab41-d012f2d4d775" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.109:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 11:04:34 crc kubenswrapper[4854]: I1125 11:04:34.273708 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-sks98" podUID="d51e017c-9c85-443a-a5b9-b8b8969bb019" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.110:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 11:04:34 crc kubenswrapper[4854]: I1125 11:04:34.832803 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:04:34 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:04:34 crc kubenswrapper[4854]: > Nov 25 11:04:36 crc kubenswrapper[4854]: I1125 11:04:36.024685 4854 scope.go:117] "RemoveContainer" containerID="28aef1fe7456732048d85d480eef2daebd6cab8e5a37cc28235f3301c14e874a" Nov 25 11:04:36 crc kubenswrapper[4854]: E1125 11:04:36.031586 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:04:37 crc kubenswrapper[4854]: I1125 11:04:37.954952 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-54qq5" event={"ID":"ba9ffeb1-440a-4fe9-bc51-6343bc4582ff","Type":"ContainerDied","Data":"156aa2497f689b427f46e201b987a3731260cca36d01c694678c56ccb69ae804"} Nov 25 11:04:37 crc kubenswrapper[4854]: I1125 11:04:37.978870 4854 generic.go:334] "Generic (PLEG): container finished" podID="ba9ffeb1-440a-4fe9-bc51-6343bc4582ff" containerID="156aa2497f689b427f46e201b987a3731260cca36d01c694678c56ccb69ae804" exitCode=0 Nov 25 11:04:40 crc kubenswrapper[4854]: I1125 11:04:40.015413 4854 generic.go:334] "Generic (PLEG): container finished" podID="e46d750e-cbe6-4e6c-ab4e-0256c54ba262" containerID="ee05143f485a66bce92d2fa9c6505c7161404a8b4fa7635c4d3eee63c9c00c96" exitCode=0 Nov 25 11:04:40 crc kubenswrapper[4854]: I1125 11:04:40.015516 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x8rn9" event={"ID":"e46d750e-cbe6-4e6c-ab4e-0256c54ba262","Type":"ContainerDied","Data":"ee05143f485a66bce92d2fa9c6505c7161404a8b4fa7635c4d3eee63c9c00c96"} Nov 25 11:04:42 crc kubenswrapper[4854]: I1125 11:04:42.008175 4854 patch_prober.go:28] interesting pod/prometheus-operator-admission-webhook-f54c54754-pbrtp container/prometheus-operator-admission-webhook namespace/openshift-monitoring: Liveness probe status=failure output="Get \"https://10.217.0.63:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 11:04:42 crc kubenswrapper[4854]: I1125 11:04:42.008143 4854 patch_prober.go:28] interesting pod/prometheus-operator-admission-webhook-f54c54754-pbrtp container/prometheus-operator-admission-webhook namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.63:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 11:04:42 crc kubenswrapper[4854]: I1125 11:04:42.025444 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-pbrtp" podUID="a448acc0-ef0a-45fc-b3db-67fdc5ff24c4" containerName="prometheus-operator-admission-webhook" probeResult="failure" output="Get \"https://10.217.0.63:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 11:04:42 crc kubenswrapper[4854]: I1125 11:04:42.025594 4854 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-pbrtp" podUID="a448acc0-ef0a-45fc-b3db-67fdc5ff24c4" containerName="prometheus-operator-admission-webhook" probeResult="failure" output="Get \"https://10.217.0.63:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 11:04:42 crc kubenswrapper[4854]: I1125 11:04:42.337853 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-f9q69" podUID="a3c67adc-f296-4e37-a023-1c478d8abcd7" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 11:04:44 crc kubenswrapper[4854]: I1125 11:04:44.102055 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-54qq5" event={"ID":"ba9ffeb1-440a-4fe9-bc51-6343bc4582ff","Type":"ContainerStarted","Data":"e3b3341ee73133399457b446e46ec7dd2bdc45eb952f8065e5bf520a8e064a1a"} Nov 25 11:04:44 crc kubenswrapper[4854]: I1125 11:04:44.147621 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-54qq5" podStartSLOduration=5.949127956 podStartE2EDuration="18.145504892s" podCreationTimestamp="2025-11-25 11:04:26 +0000 UTC" firstStartedPulling="2025-11-25 11:04:29.85957642 +0000 UTC m=+5275.712569806" lastFinishedPulling="2025-11-25 11:04:42.055953366 +0000 UTC m=+5287.908946742" observedRunningTime="2025-11-25 11:04:44.126052809 +0000 UTC m=+5289.979046205" watchObservedRunningTime="2025-11-25 11:04:44.145504892 +0000 UTC m=+5289.998498258" Nov 25 11:04:44 crc kubenswrapper[4854]: I1125 11:04:44.852060 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:04:44 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:04:44 crc kubenswrapper[4854]: > Nov 25 11:04:45 crc kubenswrapper[4854]: I1125 11:04:45.151502 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x8rn9" event={"ID":"e46d750e-cbe6-4e6c-ab4e-0256c54ba262","Type":"ContainerStarted","Data":"e459ff790a0d4d151efc83b8621fdbd23425de07b55f4baed6ebc5a3bd9c1e50"} Nov 25 11:04:45 crc kubenswrapper[4854]: I1125 11:04:45.215159 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-x8rn9" podStartSLOduration=5.296983388 podStartE2EDuration="20.215139582s" podCreationTimestamp="2025-11-25 11:04:25 +0000 UTC" firstStartedPulling="2025-11-25 11:04:28.817796033 +0000 UTC m=+5274.670789409" lastFinishedPulling="2025-11-25 11:04:43.735952227 +0000 UTC m=+5289.588945603" observedRunningTime="2025-11-25 11:04:45.213907838 +0000 UTC m=+5291.066901224" watchObservedRunningTime="2025-11-25 11:04:45.215139582 +0000 UTC m=+5291.068132958" Nov 25 11:04:46 crc kubenswrapper[4854]: I1125 11:04:46.064054 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-x8rn9" Nov 25 11:04:46 crc kubenswrapper[4854]: I1125 11:04:46.064130 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-x8rn9" Nov 25 11:04:47 crc kubenswrapper[4854]: I1125 11:04:47.291303 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-x8rn9" podUID="e46d750e-cbe6-4e6c-ab4e-0256c54ba262" containerName="registry-server" probeResult="failure" output=< Nov 25 11:04:47 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:04:47 crc kubenswrapper[4854]: > Nov 25 11:04:47 crc kubenswrapper[4854]: I1125 11:04:47.434418 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-54qq5" Nov 25 11:04:47 crc kubenswrapper[4854]: I1125 11:04:47.434476 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-54qq5" Nov 25 11:04:48 crc kubenswrapper[4854]: I1125 11:04:48.494364 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-54qq5" podUID="ba9ffeb1-440a-4fe9-bc51-6343bc4582ff" containerName="registry-server" probeResult="failure" output=< Nov 25 11:04:48 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:04:48 crc kubenswrapper[4854]: > Nov 25 11:04:51 crc kubenswrapper[4854]: I1125 11:04:51.023688 4854 scope.go:117] "RemoveContainer" containerID="28aef1fe7456732048d85d480eef2daebd6cab8e5a37cc28235f3301c14e874a" Nov 25 11:04:51 crc kubenswrapper[4854]: E1125 11:04:51.026072 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:04:54 crc kubenswrapper[4854]: I1125 11:04:54.794293 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:04:54 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:04:54 crc kubenswrapper[4854]: > Nov 25 11:04:57 crc kubenswrapper[4854]: I1125 11:04:57.122574 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-x8rn9" podUID="e46d750e-cbe6-4e6c-ab4e-0256c54ba262" containerName="registry-server" probeResult="failure" output=< Nov 25 11:04:57 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:04:57 crc kubenswrapper[4854]: > Nov 25 11:04:58 crc kubenswrapper[4854]: I1125 11:04:58.507523 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-54qq5" podUID="ba9ffeb1-440a-4fe9-bc51-6343bc4582ff" containerName="registry-server" probeResult="failure" output=< Nov 25 11:04:58 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:04:58 crc kubenswrapper[4854]: > Nov 25 11:05:04 crc kubenswrapper[4854]: I1125 11:05:04.015090 4854 scope.go:117] "RemoveContainer" containerID="28aef1fe7456732048d85d480eef2daebd6cab8e5a37cc28235f3301c14e874a" Nov 25 11:05:04 crc kubenswrapper[4854]: E1125 11:05:04.016070 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:05:04 crc kubenswrapper[4854]: I1125 11:05:04.773112 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:05:04 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:05:04 crc kubenswrapper[4854]: > Nov 25 11:05:07 crc kubenswrapper[4854]: I1125 11:05:07.109221 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-x8rn9" podUID="e46d750e-cbe6-4e6c-ab4e-0256c54ba262" containerName="registry-server" probeResult="failure" output=< Nov 25 11:05:07 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:05:07 crc kubenswrapper[4854]: > Nov 25 11:05:08 crc kubenswrapper[4854]: I1125 11:05:08.522286 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-54qq5" podUID="ba9ffeb1-440a-4fe9-bc51-6343bc4582ff" containerName="registry-server" probeResult="failure" output=< Nov 25 11:05:08 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:05:08 crc kubenswrapper[4854]: > Nov 25 11:05:14 crc kubenswrapper[4854]: I1125 11:05:14.786879 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:05:14 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:05:14 crc kubenswrapper[4854]: > Nov 25 11:05:16 crc kubenswrapper[4854]: I1125 11:05:16.019562 4854 scope.go:117] "RemoveContainer" containerID="28aef1fe7456732048d85d480eef2daebd6cab8e5a37cc28235f3301c14e874a" Nov 25 11:05:16 crc kubenswrapper[4854]: E1125 11:05:16.020288 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:05:17 crc kubenswrapper[4854]: I1125 11:05:17.128248 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-x8rn9" podUID="e46d750e-cbe6-4e6c-ab4e-0256c54ba262" containerName="registry-server" probeResult="failure" output=< Nov 25 11:05:17 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:05:17 crc kubenswrapper[4854]: > Nov 25 11:05:18 crc kubenswrapper[4854]: I1125 11:05:18.511713 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-54qq5" podUID="ba9ffeb1-440a-4fe9-bc51-6343bc4582ff" containerName="registry-server" probeResult="failure" output=< Nov 25 11:05:18 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:05:18 crc kubenswrapper[4854]: > Nov 25 11:05:24 crc kubenswrapper[4854]: I1125 11:05:24.808149 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:05:24 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:05:24 crc kubenswrapper[4854]: > Nov 25 11:05:27 crc kubenswrapper[4854]: I1125 11:05:27.124270 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-x8rn9" podUID="e46d750e-cbe6-4e6c-ab4e-0256c54ba262" containerName="registry-server" probeResult="failure" output=< Nov 25 11:05:27 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:05:27 crc kubenswrapper[4854]: > Nov 25 11:05:28 crc kubenswrapper[4854]: I1125 11:05:28.705490 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-54qq5" podUID="ba9ffeb1-440a-4fe9-bc51-6343bc4582ff" containerName="registry-server" probeResult="failure" output=< Nov 25 11:05:28 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:05:28 crc kubenswrapper[4854]: > Nov 25 11:05:31 crc kubenswrapper[4854]: I1125 11:05:31.020279 4854 scope.go:117] "RemoveContainer" containerID="28aef1fe7456732048d85d480eef2daebd6cab8e5a37cc28235f3301c14e874a" Nov 25 11:05:31 crc kubenswrapper[4854]: E1125 11:05:31.038382 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:05:33 crc kubenswrapper[4854]: I1125 11:05:33.920545 4854 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.179750668s: [/var/lib/containers/storage/overlay/67fff2cebf2ac83927bf1efb1b4d4fad423fcfc547ee6145ca5369fa37761bcc/diff /var/log/pods/openstack_openstackclient_b91ef9a0-60a5-4dd9-9239-a784c885f332/openstackclient/0.log]; will not log again for this container unless duration exceeds 2s Nov 25 11:05:34 crc kubenswrapper[4854]: I1125 11:05:34.934779 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:05:34 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:05:34 crc kubenswrapper[4854]: > Nov 25 11:05:37 crc kubenswrapper[4854]: I1125 11:05:37.118790 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-x8rn9" podUID="e46d750e-cbe6-4e6c-ab4e-0256c54ba262" containerName="registry-server" probeResult="failure" output=< Nov 25 11:05:37 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:05:37 crc kubenswrapper[4854]: > Nov 25 11:05:38 crc kubenswrapper[4854]: I1125 11:05:38.508386 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-54qq5" podUID="ba9ffeb1-440a-4fe9-bc51-6343bc4582ff" containerName="registry-server" probeResult="failure" output=< Nov 25 11:05:38 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:05:38 crc kubenswrapper[4854]: > Nov 25 11:05:44 crc kubenswrapper[4854]: I1125 11:05:44.805887 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:05:44 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:05:44 crc kubenswrapper[4854]: > Nov 25 11:05:46 crc kubenswrapper[4854]: I1125 11:05:46.018811 4854 scope.go:117] "RemoveContainer" containerID="28aef1fe7456732048d85d480eef2daebd6cab8e5a37cc28235f3301c14e874a" Nov 25 11:05:46 crc kubenswrapper[4854]: E1125 11:05:46.019614 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:05:47 crc kubenswrapper[4854]: I1125 11:05:47.116362 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-x8rn9" podUID="e46d750e-cbe6-4e6c-ab4e-0256c54ba262" containerName="registry-server" probeResult="failure" output=< Nov 25 11:05:47 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:05:47 crc kubenswrapper[4854]: > Nov 25 11:05:50 crc kubenswrapper[4854]: I1125 11:05:50.649651 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-54qq5" podUID="ba9ffeb1-440a-4fe9-bc51-6343bc4582ff" containerName="registry-server" probeResult="failure" output=< Nov 25 11:05:50 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:05:50 crc kubenswrapper[4854]: > Nov 25 11:05:54 crc kubenswrapper[4854]: I1125 11:05:54.793013 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:05:54 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:05:54 crc kubenswrapper[4854]: > Nov 25 11:05:57 crc kubenswrapper[4854]: I1125 11:05:57.014140 4854 scope.go:117] "RemoveContainer" containerID="28aef1fe7456732048d85d480eef2daebd6cab8e5a37cc28235f3301c14e874a" Nov 25 11:05:57 crc kubenswrapper[4854]: E1125 11:05:57.015247 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:05:57 crc kubenswrapper[4854]: I1125 11:05:57.119175 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-x8rn9" podUID="e46d750e-cbe6-4e6c-ab4e-0256c54ba262" containerName="registry-server" probeResult="failure" output=< Nov 25 11:05:57 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:05:57 crc kubenswrapper[4854]: > Nov 25 11:05:58 crc kubenswrapper[4854]: I1125 11:05:58.493267 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-54qq5" podUID="ba9ffeb1-440a-4fe9-bc51-6343bc4582ff" containerName="registry-server" probeResult="failure" output=< Nov 25 11:05:58 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:05:58 crc kubenswrapper[4854]: > Nov 25 11:06:04 crc kubenswrapper[4854]: I1125 11:06:04.767800 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:06:04 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:06:04 crc kubenswrapper[4854]: > Nov 25 11:06:04 crc kubenswrapper[4854]: I1125 11:06:04.771862 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-tvkn9" Nov 25 11:06:04 crc kubenswrapper[4854]: I1125 11:06:04.776886 4854 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="registry-server" containerStatusID={"Type":"cri-o","ID":"0a06a0a5005bae88d7e0fb4f2767ea452c6ac869c412879398b882d149ba3a53"} pod="openshift-marketplace/redhat-operators-tvkn9" containerMessage="Container registry-server failed startup probe, will be restarted" Nov 25 11:06:04 crc kubenswrapper[4854]: I1125 11:06:04.777338 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" containerID="cri-o://0a06a0a5005bae88d7e0fb4f2767ea452c6ac869c412879398b882d149ba3a53" gracePeriod=30 Nov 25 11:06:06 crc kubenswrapper[4854]: I1125 11:06:06.157103 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-x8rn9" Nov 25 11:06:06 crc kubenswrapper[4854]: I1125 11:06:06.218571 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-x8rn9" Nov 25 11:06:06 crc kubenswrapper[4854]: I1125 11:06:06.452974 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-x8rn9"] Nov 25 11:06:07 crc kubenswrapper[4854]: I1125 11:06:07.523551 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-54qq5" Nov 25 11:06:07 crc kubenswrapper[4854]: I1125 11:06:07.585084 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-54qq5" Nov 25 11:06:08 crc kubenswrapper[4854]: I1125 11:06:08.028303 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-54qq5"] Nov 25 11:06:08 crc kubenswrapper[4854]: I1125 11:06:08.175611 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-x8rn9" podUID="e46d750e-cbe6-4e6c-ab4e-0256c54ba262" containerName="registry-server" containerID="cri-o://e459ff790a0d4d151efc83b8621fdbd23425de07b55f4baed6ebc5a3bd9c1e50" gracePeriod=2 Nov 25 11:06:09 crc kubenswrapper[4854]: I1125 11:06:09.248972 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x8rn9" event={"ID":"e46d750e-cbe6-4e6c-ab4e-0256c54ba262","Type":"ContainerDied","Data":"e459ff790a0d4d151efc83b8621fdbd23425de07b55f4baed6ebc5a3bd9c1e50"} Nov 25 11:06:09 crc kubenswrapper[4854]: I1125 11:06:09.251889 4854 generic.go:334] "Generic (PLEG): container finished" podID="e46d750e-cbe6-4e6c-ab4e-0256c54ba262" containerID="e459ff790a0d4d151efc83b8621fdbd23425de07b55f4baed6ebc5a3bd9c1e50" exitCode=0 Nov 25 11:06:09 crc kubenswrapper[4854]: I1125 11:06:09.252253 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-54qq5" podUID="ba9ffeb1-440a-4fe9-bc51-6343bc4582ff" containerName="registry-server" containerID="cri-o://e3b3341ee73133399457b446e46ec7dd2bdc45eb952f8065e5bf520a8e064a1a" gracePeriod=2 Nov 25 11:06:10 crc kubenswrapper[4854]: I1125 11:06:10.020041 4854 scope.go:117] "RemoveContainer" containerID="28aef1fe7456732048d85d480eef2daebd6cab8e5a37cc28235f3301c14e874a" Nov 25 11:06:10 crc kubenswrapper[4854]: E1125 11:06:10.024405 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:06:10 crc kubenswrapper[4854]: I1125 11:06:10.278783 4854 generic.go:334] "Generic (PLEG): container finished" podID="ba9ffeb1-440a-4fe9-bc51-6343bc4582ff" containerID="e3b3341ee73133399457b446e46ec7dd2bdc45eb952f8065e5bf520a8e064a1a" exitCode=0 Nov 25 11:06:10 crc kubenswrapper[4854]: I1125 11:06:10.278825 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-54qq5" event={"ID":"ba9ffeb1-440a-4fe9-bc51-6343bc4582ff","Type":"ContainerDied","Data":"e3b3341ee73133399457b446e46ec7dd2bdc45eb952f8065e5bf520a8e064a1a"} Nov 25 11:06:11 crc kubenswrapper[4854]: I1125 11:06:11.293640 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x8rn9" event={"ID":"e46d750e-cbe6-4e6c-ab4e-0256c54ba262","Type":"ContainerDied","Data":"8351cfb1ab31245ec311ff5f94dcc2e0af5aa80c64db2f7ffba9ec62bdd68d8e"} Nov 25 11:06:11 crc kubenswrapper[4854]: I1125 11:06:11.296190 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8351cfb1ab31245ec311ff5f94dcc2e0af5aa80c64db2f7ffba9ec62bdd68d8e" Nov 25 11:06:11 crc kubenswrapper[4854]: I1125 11:06:11.375683 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x8rn9" Nov 25 11:06:11 crc kubenswrapper[4854]: I1125 11:06:11.524687 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e46d750e-cbe6-4e6c-ab4e-0256c54ba262-catalog-content\") pod \"e46d750e-cbe6-4e6c-ab4e-0256c54ba262\" (UID: \"e46d750e-cbe6-4e6c-ab4e-0256c54ba262\") " Nov 25 11:06:11 crc kubenswrapper[4854]: I1125 11:06:11.524769 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e46d750e-cbe6-4e6c-ab4e-0256c54ba262-utilities\") pod \"e46d750e-cbe6-4e6c-ab4e-0256c54ba262\" (UID: \"e46d750e-cbe6-4e6c-ab4e-0256c54ba262\") " Nov 25 11:06:11 crc kubenswrapper[4854]: I1125 11:06:11.525084 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6pjj2\" (UniqueName: \"kubernetes.io/projected/e46d750e-cbe6-4e6c-ab4e-0256c54ba262-kube-api-access-6pjj2\") pod \"e46d750e-cbe6-4e6c-ab4e-0256c54ba262\" (UID: \"e46d750e-cbe6-4e6c-ab4e-0256c54ba262\") " Nov 25 11:06:11 crc kubenswrapper[4854]: I1125 11:06:11.532439 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e46d750e-cbe6-4e6c-ab4e-0256c54ba262-utilities" (OuterVolumeSpecName: "utilities") pod "e46d750e-cbe6-4e6c-ab4e-0256c54ba262" (UID: "e46d750e-cbe6-4e6c-ab4e-0256c54ba262"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:06:11 crc kubenswrapper[4854]: I1125 11:06:11.613394 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e46d750e-cbe6-4e6c-ab4e-0256c54ba262-kube-api-access-6pjj2" (OuterVolumeSpecName: "kube-api-access-6pjj2") pod "e46d750e-cbe6-4e6c-ab4e-0256c54ba262" (UID: "e46d750e-cbe6-4e6c-ab4e-0256c54ba262"). InnerVolumeSpecName "kube-api-access-6pjj2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:06:11 crc kubenswrapper[4854]: I1125 11:06:11.629773 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6pjj2\" (UniqueName: \"kubernetes.io/projected/e46d750e-cbe6-4e6c-ab4e-0256c54ba262-kube-api-access-6pjj2\") on node \"crc\" DevicePath \"\"" Nov 25 11:06:11 crc kubenswrapper[4854]: I1125 11:06:11.629812 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e46d750e-cbe6-4e6c-ab4e-0256c54ba262-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 11:06:11 crc kubenswrapper[4854]: I1125 11:06:11.868461 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-54qq5" Nov 25 11:06:11 crc kubenswrapper[4854]: I1125 11:06:11.989345 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e46d750e-cbe6-4e6c-ab4e-0256c54ba262-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e46d750e-cbe6-4e6c-ab4e-0256c54ba262" (UID: "e46d750e-cbe6-4e6c-ab4e-0256c54ba262"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:06:12 crc kubenswrapper[4854]: I1125 11:06:12.045367 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rdcpb\" (UniqueName: \"kubernetes.io/projected/ba9ffeb1-440a-4fe9-bc51-6343bc4582ff-kube-api-access-rdcpb\") pod \"ba9ffeb1-440a-4fe9-bc51-6343bc4582ff\" (UID: \"ba9ffeb1-440a-4fe9-bc51-6343bc4582ff\") " Nov 25 11:06:12 crc kubenswrapper[4854]: I1125 11:06:12.045506 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba9ffeb1-440a-4fe9-bc51-6343bc4582ff-catalog-content\") pod \"ba9ffeb1-440a-4fe9-bc51-6343bc4582ff\" (UID: \"ba9ffeb1-440a-4fe9-bc51-6343bc4582ff\") " Nov 25 11:06:12 crc kubenswrapper[4854]: I1125 11:06:12.045743 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba9ffeb1-440a-4fe9-bc51-6343bc4582ff-utilities\") pod \"ba9ffeb1-440a-4fe9-bc51-6343bc4582ff\" (UID: \"ba9ffeb1-440a-4fe9-bc51-6343bc4582ff\") " Nov 25 11:06:12 crc kubenswrapper[4854]: I1125 11:06:12.046525 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba9ffeb1-440a-4fe9-bc51-6343bc4582ff-utilities" (OuterVolumeSpecName: "utilities") pod "ba9ffeb1-440a-4fe9-bc51-6343bc4582ff" (UID: "ba9ffeb1-440a-4fe9-bc51-6343bc4582ff"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:06:12 crc kubenswrapper[4854]: I1125 11:06:12.046700 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e46d750e-cbe6-4e6c-ab4e-0256c54ba262-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 11:06:12 crc kubenswrapper[4854]: I1125 11:06:12.046716 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba9ffeb1-440a-4fe9-bc51-6343bc4582ff-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 11:06:12 crc kubenswrapper[4854]: I1125 11:06:12.094712 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba9ffeb1-440a-4fe9-bc51-6343bc4582ff-kube-api-access-rdcpb" (OuterVolumeSpecName: "kube-api-access-rdcpb") pod "ba9ffeb1-440a-4fe9-bc51-6343bc4582ff" (UID: "ba9ffeb1-440a-4fe9-bc51-6343bc4582ff"). InnerVolumeSpecName "kube-api-access-rdcpb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:06:12 crc kubenswrapper[4854]: I1125 11:06:12.148847 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rdcpb\" (UniqueName: \"kubernetes.io/projected/ba9ffeb1-440a-4fe9-bc51-6343bc4582ff-kube-api-access-rdcpb\") on node \"crc\" DevicePath \"\"" Nov 25 11:06:12 crc kubenswrapper[4854]: I1125 11:06:12.308347 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x8rn9" Nov 25 11:06:12 crc kubenswrapper[4854]: I1125 11:06:12.308373 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-54qq5" Nov 25 11:06:12 crc kubenswrapper[4854]: I1125 11:06:12.308478 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-54qq5" event={"ID":"ba9ffeb1-440a-4fe9-bc51-6343bc4582ff","Type":"ContainerDied","Data":"75d1b80ff159010be726aaf99633e6b7d96e85da2e5ec473bc11f17899eed3cf"} Nov 25 11:06:12 crc kubenswrapper[4854]: I1125 11:06:12.311375 4854 scope.go:117] "RemoveContainer" containerID="e3b3341ee73133399457b446e46ec7dd2bdc45eb952f8065e5bf520a8e064a1a" Nov 25 11:06:12 crc kubenswrapper[4854]: I1125 11:06:12.312364 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba9ffeb1-440a-4fe9-bc51-6343bc4582ff-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ba9ffeb1-440a-4fe9-bc51-6343bc4582ff" (UID: "ba9ffeb1-440a-4fe9-bc51-6343bc4582ff"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:06:12 crc kubenswrapper[4854]: I1125 11:06:12.353713 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba9ffeb1-440a-4fe9-bc51-6343bc4582ff-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 11:06:12 crc kubenswrapper[4854]: I1125 11:06:12.370816 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-x8rn9"] Nov 25 11:06:12 crc kubenswrapper[4854]: I1125 11:06:12.383632 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-x8rn9"] Nov 25 11:06:12 crc kubenswrapper[4854]: I1125 11:06:12.512483 4854 scope.go:117] "RemoveContainer" containerID="156aa2497f689b427f46e201b987a3731260cca36d01c694678c56ccb69ae804" Nov 25 11:06:12 crc kubenswrapper[4854]: I1125 11:06:12.589384 4854 scope.go:117] "RemoveContainer" containerID="95657b1991f56b3f2e894656830746e0d60e27bb665a7e24e34c0656d9f390b8" Nov 25 11:06:12 crc kubenswrapper[4854]: I1125 11:06:12.685715 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-54qq5"] Nov 25 11:06:12 crc kubenswrapper[4854]: I1125 11:06:12.698122 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-54qq5"] Nov 25 11:06:13 crc kubenswrapper[4854]: I1125 11:06:13.032843 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba9ffeb1-440a-4fe9-bc51-6343bc4582ff" path="/var/lib/kubelet/pods/ba9ffeb1-440a-4fe9-bc51-6343bc4582ff/volumes" Nov 25 11:06:13 crc kubenswrapper[4854]: I1125 11:06:13.039960 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e46d750e-cbe6-4e6c-ab4e-0256c54ba262" path="/var/lib/kubelet/pods/e46d750e-cbe6-4e6c-ab4e-0256c54ba262/volumes" Nov 25 11:06:21 crc kubenswrapper[4854]: I1125 11:06:21.014245 4854 scope.go:117] "RemoveContainer" containerID="28aef1fe7456732048d85d480eef2daebd6cab8e5a37cc28235f3301c14e874a" Nov 25 11:06:21 crc kubenswrapper[4854]: E1125 11:06:21.015404 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:06:32 crc kubenswrapper[4854]: I1125 11:06:32.014520 4854 scope.go:117] "RemoveContainer" containerID="28aef1fe7456732048d85d480eef2daebd6cab8e5a37cc28235f3301c14e874a" Nov 25 11:06:32 crc kubenswrapper[4854]: E1125 11:06:32.015313 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:06:35 crc kubenswrapper[4854]: I1125 11:06:35.954769 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tvkn9_2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2/registry-server/1.log" Nov 25 11:06:35 crc kubenswrapper[4854]: I1125 11:06:35.965567 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tvkn9_2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2/registry-server/0.log" Nov 25 11:06:35 crc kubenswrapper[4854]: I1125 11:06:35.978210 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvkn9" event={"ID":"2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2","Type":"ContainerDied","Data":"0a06a0a5005bae88d7e0fb4f2767ea452c6ac869c412879398b882d149ba3a53"} Nov 25 11:06:35 crc kubenswrapper[4854]: I1125 11:06:35.979776 4854 scope.go:117] "RemoveContainer" containerID="53acfb905d75fc228423c03d888a5561ce21fa116c3c609cf8817c17b777afa5" Nov 25 11:06:35 crc kubenswrapper[4854]: I1125 11:06:35.981103 4854 generic.go:334] "Generic (PLEG): container finished" podID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerID="0a06a0a5005bae88d7e0fb4f2767ea452c6ac869c412879398b882d149ba3a53" exitCode=137 Nov 25 11:06:37 crc kubenswrapper[4854]: I1125 11:06:37.044621 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tvkn9_2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2/registry-server/1.log" Nov 25 11:06:39 crc kubenswrapper[4854]: I1125 11:06:39.072532 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tvkn9_2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2/registry-server/1.log" Nov 25 11:06:39 crc kubenswrapper[4854]: I1125 11:06:39.073746 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvkn9" event={"ID":"2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2","Type":"ContainerStarted","Data":"13aa8dbde42df6e066dff0df72ea21977a923dbaa244fcee16889ffd433e15b3"} Nov 25 11:06:43 crc kubenswrapper[4854]: I1125 11:06:43.713573 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-tvkn9" Nov 25 11:06:43 crc kubenswrapper[4854]: I1125 11:06:43.714382 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-tvkn9" Nov 25 11:06:44 crc kubenswrapper[4854]: I1125 11:06:44.927745 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:06:44 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:06:44 crc kubenswrapper[4854]: > Nov 25 11:06:45 crc kubenswrapper[4854]: I1125 11:06:45.082192 4854 scope.go:117] "RemoveContainer" containerID="28aef1fe7456732048d85d480eef2daebd6cab8e5a37cc28235f3301c14e874a" Nov 25 11:06:45 crc kubenswrapper[4854]: E1125 11:06:45.082624 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:06:54 crc kubenswrapper[4854]: I1125 11:06:54.771751 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:06:54 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:06:54 crc kubenswrapper[4854]: > Nov 25 11:06:56 crc kubenswrapper[4854]: I1125 11:06:56.014885 4854 scope.go:117] "RemoveContainer" containerID="28aef1fe7456732048d85d480eef2daebd6cab8e5a37cc28235f3301c14e874a" Nov 25 11:06:57 crc kubenswrapper[4854]: I1125 11:06:57.278458 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerStarted","Data":"59b9dc5684fbe6cb863e0bbff479f5dd9642d5514beb86a0888753412497703f"} Nov 25 11:07:04 crc kubenswrapper[4854]: I1125 11:07:04.776514 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:07:04 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:07:04 crc kubenswrapper[4854]: > Nov 25 11:07:14 crc kubenswrapper[4854]: I1125 11:07:14.780971 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:07:14 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:07:14 crc kubenswrapper[4854]: > Nov 25 11:07:24 crc kubenswrapper[4854]: I1125 11:07:24.770349 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:07:24 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:07:24 crc kubenswrapper[4854]: > Nov 25 11:07:34 crc kubenswrapper[4854]: I1125 11:07:34.764209 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:07:34 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:07:34 crc kubenswrapper[4854]: > Nov 25 11:07:44 crc kubenswrapper[4854]: I1125 11:07:44.765314 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:07:44 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:07:44 crc kubenswrapper[4854]: > Nov 25 11:07:54 crc kubenswrapper[4854]: I1125 11:07:54.763878 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:07:54 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:07:54 crc kubenswrapper[4854]: > Nov 25 11:08:04 crc kubenswrapper[4854]: I1125 11:08:04.818839 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:08:04 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:08:04 crc kubenswrapper[4854]: > Nov 25 11:08:14 crc kubenswrapper[4854]: I1125 11:08:14.765770 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:08:14 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:08:14 crc kubenswrapper[4854]: > Nov 25 11:08:14 crc kubenswrapper[4854]: I1125 11:08:14.767567 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-tvkn9" Nov 25 11:08:14 crc kubenswrapper[4854]: I1125 11:08:14.798728 4854 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="registry-server" containerStatusID={"Type":"cri-o","ID":"13aa8dbde42df6e066dff0df72ea21977a923dbaa244fcee16889ffd433e15b3"} pod="openshift-marketplace/redhat-operators-tvkn9" containerMessage="Container registry-server failed startup probe, will be restarted" Nov 25 11:08:14 crc kubenswrapper[4854]: I1125 11:08:14.809422 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" containerID="cri-o://13aa8dbde42df6e066dff0df72ea21977a923dbaa244fcee16889ffd433e15b3" gracePeriod=30 Nov 25 11:08:20 crc kubenswrapper[4854]: I1125 11:08:20.258607 4854 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 11:08:20 crc kubenswrapper[4854]: I1125 11:08:20.672008 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tvkn9_2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2/registry-server/1.log" Nov 25 11:08:20 crc kubenswrapper[4854]: I1125 11:08:20.674394 4854 generic.go:334] "Generic (PLEG): container finished" podID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerID="13aa8dbde42df6e066dff0df72ea21977a923dbaa244fcee16889ffd433e15b3" exitCode=0 Nov 25 11:08:20 crc kubenswrapper[4854]: I1125 11:08:20.675100 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvkn9" event={"ID":"2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2","Type":"ContainerDied","Data":"13aa8dbde42df6e066dff0df72ea21977a923dbaa244fcee16889ffd433e15b3"} Nov 25 11:08:20 crc kubenswrapper[4854]: I1125 11:08:20.680115 4854 scope.go:117] "RemoveContainer" containerID="0a06a0a5005bae88d7e0fb4f2767ea452c6ac869c412879398b882d149ba3a53" Nov 25 11:08:22 crc kubenswrapper[4854]: I1125 11:08:22.711788 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvkn9" event={"ID":"2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2","Type":"ContainerStarted","Data":"d797478f1419064fdb6899232fd642522afba557bc96493de5717b6f344c14d5"} Nov 25 11:08:23 crc kubenswrapper[4854]: I1125 11:08:23.712788 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-tvkn9" Nov 25 11:08:23 crc kubenswrapper[4854]: I1125 11:08:23.713127 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-tvkn9" Nov 25 11:08:24 crc kubenswrapper[4854]: I1125 11:08:24.771418 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:08:24 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:08:24 crc kubenswrapper[4854]: > Nov 25 11:08:34 crc kubenswrapper[4854]: I1125 11:08:34.763837 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:08:34 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:08:34 crc kubenswrapper[4854]: > Nov 25 11:08:44 crc kubenswrapper[4854]: I1125 11:08:44.850937 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:08:44 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:08:44 crc kubenswrapper[4854]: > Nov 25 11:08:55 crc kubenswrapper[4854]: I1125 11:08:55.014905 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:08:55 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:08:55 crc kubenswrapper[4854]: > Nov 25 11:09:04 crc kubenswrapper[4854]: I1125 11:09:04.774158 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:09:04 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:09:04 crc kubenswrapper[4854]: > Nov 25 11:09:14 crc kubenswrapper[4854]: I1125 11:09:14.827466 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:09:14 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:09:14 crc kubenswrapper[4854]: > Nov 25 11:09:24 crc kubenswrapper[4854]: I1125 11:09:24.768454 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:09:24 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:09:24 crc kubenswrapper[4854]: > Nov 25 11:09:25 crc kubenswrapper[4854]: I1125 11:09:25.029591 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 11:09:25 crc kubenswrapper[4854]: I1125 11:09:25.029666 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 11:09:34 crc kubenswrapper[4854]: I1125 11:09:34.760602 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:09:34 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:09:34 crc kubenswrapper[4854]: > Nov 25 11:09:44 crc kubenswrapper[4854]: I1125 11:09:44.317607 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-tvkn9" Nov 25 11:09:44 crc kubenswrapper[4854]: I1125 11:09:44.379750 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-tvkn9" Nov 25 11:09:44 crc kubenswrapper[4854]: I1125 11:09:44.575614 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tvkn9"] Nov 25 11:09:45 crc kubenswrapper[4854]: I1125 11:09:45.674274 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-tvkn9" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" containerID="cri-o://d797478f1419064fdb6899232fd642522afba557bc96493de5717b6f344c14d5" gracePeriod=2 Nov 25 11:09:46 crc kubenswrapper[4854]: I1125 11:09:46.711472 4854 generic.go:334] "Generic (PLEG): container finished" podID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerID="d797478f1419064fdb6899232fd642522afba557bc96493de5717b6f344c14d5" exitCode=0 Nov 25 11:09:46 crc kubenswrapper[4854]: I1125 11:09:46.711658 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvkn9" event={"ID":"2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2","Type":"ContainerDied","Data":"d797478f1419064fdb6899232fd642522afba557bc96493de5717b6f344c14d5"} Nov 25 11:09:46 crc kubenswrapper[4854]: I1125 11:09:46.713213 4854 scope.go:117] "RemoveContainer" containerID="13aa8dbde42df6e066dff0df72ea21977a923dbaa244fcee16889ffd433e15b3" Nov 25 11:09:48 crc kubenswrapper[4854]: I1125 11:09:48.537575 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tvkn9" Nov 25 11:09:48 crc kubenswrapper[4854]: I1125 11:09:48.733851 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c2rnm\" (UniqueName: \"kubernetes.io/projected/2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2-kube-api-access-c2rnm\") pod \"2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2\" (UID: \"2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2\") " Nov 25 11:09:48 crc kubenswrapper[4854]: I1125 11:09:48.734341 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2-utilities\") pod \"2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2\" (UID: \"2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2\") " Nov 25 11:09:48 crc kubenswrapper[4854]: I1125 11:09:48.734400 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2-catalog-content\") pod \"2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2\" (UID: \"2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2\") " Nov 25 11:09:48 crc kubenswrapper[4854]: I1125 11:09:48.737057 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2-utilities" (OuterVolumeSpecName: "utilities") pod "2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" (UID: "2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:09:48 crc kubenswrapper[4854]: I1125 11:09:48.737877 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvkn9" event={"ID":"2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2","Type":"ContainerDied","Data":"6d842cb647ac8b31b86f31abdcbef241ed6aaf1d1622ecc378ce38d5b4339006"} Nov 25 11:09:48 crc kubenswrapper[4854]: I1125 11:09:48.737922 4854 scope.go:117] "RemoveContainer" containerID="d797478f1419064fdb6899232fd642522afba557bc96493de5717b6f344c14d5" Nov 25 11:09:48 crc kubenswrapper[4854]: I1125 11:09:48.738031 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tvkn9" Nov 25 11:09:48 crc kubenswrapper[4854]: I1125 11:09:48.750577 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2-kube-api-access-c2rnm" (OuterVolumeSpecName: "kube-api-access-c2rnm") pod "2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" (UID: "2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2"). InnerVolumeSpecName "kube-api-access-c2rnm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:09:48 crc kubenswrapper[4854]: I1125 11:09:48.767108 4854 scope.go:117] "RemoveContainer" containerID="967cf107a88690aa3ea594ae1a552cdc32a93277044faf32b44242a40ec645b3" Nov 25 11:09:48 crc kubenswrapper[4854]: I1125 11:09:48.838588 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 11:09:48 crc kubenswrapper[4854]: I1125 11:09:48.838619 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c2rnm\" (UniqueName: \"kubernetes.io/projected/2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2-kube-api-access-c2rnm\") on node \"crc\" DevicePath \"\"" Nov 25 11:09:48 crc kubenswrapper[4854]: I1125 11:09:48.855827 4854 scope.go:117] "RemoveContainer" containerID="76cc729fe7bd5890e7074dd6b52e60f52cd72abf1fe1d380b64c8ce2aa52e551" Nov 25 11:09:48 crc kubenswrapper[4854]: I1125 11:09:48.875969 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" (UID: "2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:09:48 crc kubenswrapper[4854]: I1125 11:09:48.940533 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 11:09:49 crc kubenswrapper[4854]: I1125 11:09:49.114187 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tvkn9"] Nov 25 11:09:49 crc kubenswrapper[4854]: I1125 11:09:49.127327 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-tvkn9"] Nov 25 11:09:51 crc kubenswrapper[4854]: I1125 11:09:51.031817 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" path="/var/lib/kubelet/pods/2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2/volumes" Nov 25 11:09:55 crc kubenswrapper[4854]: I1125 11:09:55.076607 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 11:09:55 crc kubenswrapper[4854]: I1125 11:09:55.077235 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 11:10:25 crc kubenswrapper[4854]: I1125 11:10:25.029436 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 11:10:25 crc kubenswrapper[4854]: I1125 11:10:25.030035 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 11:10:25 crc kubenswrapper[4854]: I1125 11:10:25.034874 4854 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" Nov 25 11:10:25 crc kubenswrapper[4854]: I1125 11:10:25.035971 4854 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"59b9dc5684fbe6cb863e0bbff479f5dd9642d5514beb86a0888753412497703f"} pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 11:10:25 crc kubenswrapper[4854]: I1125 11:10:25.036059 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" containerID="cri-o://59b9dc5684fbe6cb863e0bbff479f5dd9642d5514beb86a0888753412497703f" gracePeriod=600 Nov 25 11:10:26 crc kubenswrapper[4854]: I1125 11:10:26.175501 4854 generic.go:334] "Generic (PLEG): container finished" podID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerID="59b9dc5684fbe6cb863e0bbff479f5dd9642d5514beb86a0888753412497703f" exitCode=0 Nov 25 11:10:26 crc kubenswrapper[4854]: I1125 11:10:26.175585 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerDied","Data":"59b9dc5684fbe6cb863e0bbff479f5dd9642d5514beb86a0888753412497703f"} Nov 25 11:10:26 crc kubenswrapper[4854]: I1125 11:10:26.176106 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerStarted","Data":"f13c3fdaf7261b1a24dcc71b8b219a5a1847806fac4c3d9c9b94540b0bcd3272"} Nov 25 11:10:26 crc kubenswrapper[4854]: I1125 11:10:26.176130 4854 scope.go:117] "RemoveContainer" containerID="28aef1fe7456732048d85d480eef2daebd6cab8e5a37cc28235f3301c14e874a" Nov 25 11:11:00 crc kubenswrapper[4854]: I1125 11:11:00.118878 4854 scope.go:117] "RemoveContainer" containerID="45d24d402bd2e15b1883e9024c304e859fa056b8c44a7e00af5c7f2b5988c8e0" Nov 25 11:11:00 crc kubenswrapper[4854]: I1125 11:11:00.150998 4854 scope.go:117] "RemoveContainer" containerID="e459ff790a0d4d151efc83b8621fdbd23425de07b55f4baed6ebc5a3bd9c1e50" Nov 25 11:11:00 crc kubenswrapper[4854]: I1125 11:11:00.204475 4854 scope.go:117] "RemoveContainer" containerID="ee05143f485a66bce92d2fa9c6505c7161404a8b4fa7635c4d3eee63c9c00c96" Nov 25 11:12:28 crc kubenswrapper[4854]: I1125 11:12:28.075408 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-8svf6"] Nov 25 11:12:28 crc kubenswrapper[4854]: E1125 11:12:28.080384 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" Nov 25 11:12:28 crc kubenswrapper[4854]: I1125 11:12:28.080415 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" Nov 25 11:12:28 crc kubenswrapper[4854]: E1125 11:12:28.080429 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba9ffeb1-440a-4fe9-bc51-6343bc4582ff" containerName="registry-server" Nov 25 11:12:28 crc kubenswrapper[4854]: I1125 11:12:28.080438 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba9ffeb1-440a-4fe9-bc51-6343bc4582ff" containerName="registry-server" Nov 25 11:12:28 crc kubenswrapper[4854]: E1125 11:12:28.080456 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="extract-utilities" Nov 25 11:12:28 crc kubenswrapper[4854]: I1125 11:12:28.080464 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="extract-utilities" Nov 25 11:12:28 crc kubenswrapper[4854]: E1125 11:12:28.080484 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba9ffeb1-440a-4fe9-bc51-6343bc4582ff" containerName="extract-utilities" Nov 25 11:12:28 crc kubenswrapper[4854]: I1125 11:12:28.080492 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba9ffeb1-440a-4fe9-bc51-6343bc4582ff" containerName="extract-utilities" Nov 25 11:12:28 crc kubenswrapper[4854]: E1125 11:12:28.080507 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba9ffeb1-440a-4fe9-bc51-6343bc4582ff" containerName="extract-content" Nov 25 11:12:28 crc kubenswrapper[4854]: I1125 11:12:28.080514 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba9ffeb1-440a-4fe9-bc51-6343bc4582ff" containerName="extract-content" Nov 25 11:12:28 crc kubenswrapper[4854]: E1125 11:12:28.080534 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" Nov 25 11:12:28 crc kubenswrapper[4854]: I1125 11:12:28.080541 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" Nov 25 11:12:28 crc kubenswrapper[4854]: E1125 11:12:28.080554 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e46d750e-cbe6-4e6c-ab4e-0256c54ba262" containerName="extract-content" Nov 25 11:12:28 crc kubenswrapper[4854]: I1125 11:12:28.080563 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="e46d750e-cbe6-4e6c-ab4e-0256c54ba262" containerName="extract-content" Nov 25 11:12:28 crc kubenswrapper[4854]: E1125 11:12:28.080580 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e46d750e-cbe6-4e6c-ab4e-0256c54ba262" containerName="extract-utilities" Nov 25 11:12:28 crc kubenswrapper[4854]: I1125 11:12:28.080588 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="e46d750e-cbe6-4e6c-ab4e-0256c54ba262" containerName="extract-utilities" Nov 25 11:12:28 crc kubenswrapper[4854]: E1125 11:12:28.080605 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e46d750e-cbe6-4e6c-ab4e-0256c54ba262" containerName="registry-server" Nov 25 11:12:28 crc kubenswrapper[4854]: I1125 11:12:28.080613 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="e46d750e-cbe6-4e6c-ab4e-0256c54ba262" containerName="registry-server" Nov 25 11:12:28 crc kubenswrapper[4854]: E1125 11:12:28.080629 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="extract-content" Nov 25 11:12:28 crc kubenswrapper[4854]: I1125 11:12:28.080636 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="extract-content" Nov 25 11:12:28 crc kubenswrapper[4854]: E1125 11:12:28.080646 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" Nov 25 11:12:28 crc kubenswrapper[4854]: I1125 11:12:28.080653 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" Nov 25 11:12:28 crc kubenswrapper[4854]: I1125 11:12:28.082101 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" Nov 25 11:12:28 crc kubenswrapper[4854]: I1125 11:12:28.082142 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" Nov 25 11:12:28 crc kubenswrapper[4854]: I1125 11:12:28.082161 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" Nov 25 11:12:28 crc kubenswrapper[4854]: I1125 11:12:28.082178 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="e46d750e-cbe6-4e6c-ab4e-0256c54ba262" containerName="registry-server" Nov 25 11:12:28 crc kubenswrapper[4854]: I1125 11:12:28.082190 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba9ffeb1-440a-4fe9-bc51-6343bc4582ff" containerName="registry-server" Nov 25 11:12:28 crc kubenswrapper[4854]: E1125 11:12:28.082527 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" Nov 25 11:12:28 crc kubenswrapper[4854]: I1125 11:12:28.082540 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" Nov 25 11:12:28 crc kubenswrapper[4854]: I1125 11:12:28.082863 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a83c57f-bc4c-41a8-ada5-4e80fc65e8a2" containerName="registry-server" Nov 25 11:12:28 crc kubenswrapper[4854]: I1125 11:12:28.105004 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8svf6" Nov 25 11:12:28 crc kubenswrapper[4854]: I1125 11:12:28.133658 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qflr7\" (UniqueName: \"kubernetes.io/projected/bb3ee979-be10-4582-9829-9684d7872059-kube-api-access-qflr7\") pod \"redhat-operators-8svf6\" (UID: \"bb3ee979-be10-4582-9829-9684d7872059\") " pod="openshift-marketplace/redhat-operators-8svf6" Nov 25 11:12:28 crc kubenswrapper[4854]: I1125 11:12:28.133757 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb3ee979-be10-4582-9829-9684d7872059-utilities\") pod \"redhat-operators-8svf6\" (UID: \"bb3ee979-be10-4582-9829-9684d7872059\") " pod="openshift-marketplace/redhat-operators-8svf6" Nov 25 11:12:28 crc kubenswrapper[4854]: I1125 11:12:28.133817 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb3ee979-be10-4582-9829-9684d7872059-catalog-content\") pod \"redhat-operators-8svf6\" (UID: \"bb3ee979-be10-4582-9829-9684d7872059\") " pod="openshift-marketplace/redhat-operators-8svf6" Nov 25 11:12:28 crc kubenswrapper[4854]: I1125 11:12:28.183511 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8svf6"] Nov 25 11:12:28 crc kubenswrapper[4854]: I1125 11:12:28.236529 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qflr7\" (UniqueName: \"kubernetes.io/projected/bb3ee979-be10-4582-9829-9684d7872059-kube-api-access-qflr7\") pod \"redhat-operators-8svf6\" (UID: \"bb3ee979-be10-4582-9829-9684d7872059\") " pod="openshift-marketplace/redhat-operators-8svf6" Nov 25 11:12:28 crc kubenswrapper[4854]: I1125 11:12:28.236598 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb3ee979-be10-4582-9829-9684d7872059-utilities\") pod \"redhat-operators-8svf6\" (UID: \"bb3ee979-be10-4582-9829-9684d7872059\") " pod="openshift-marketplace/redhat-operators-8svf6" Nov 25 11:12:28 crc kubenswrapper[4854]: I1125 11:12:28.236660 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb3ee979-be10-4582-9829-9684d7872059-catalog-content\") pod \"redhat-operators-8svf6\" (UID: \"bb3ee979-be10-4582-9829-9684d7872059\") " pod="openshift-marketplace/redhat-operators-8svf6" Nov 25 11:12:28 crc kubenswrapper[4854]: I1125 11:12:28.240826 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb3ee979-be10-4582-9829-9684d7872059-utilities\") pod \"redhat-operators-8svf6\" (UID: \"bb3ee979-be10-4582-9829-9684d7872059\") " pod="openshift-marketplace/redhat-operators-8svf6" Nov 25 11:12:28 crc kubenswrapper[4854]: I1125 11:12:28.240833 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb3ee979-be10-4582-9829-9684d7872059-catalog-content\") pod \"redhat-operators-8svf6\" (UID: \"bb3ee979-be10-4582-9829-9684d7872059\") " pod="openshift-marketplace/redhat-operators-8svf6" Nov 25 11:12:28 crc kubenswrapper[4854]: I1125 11:12:28.579303 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qflr7\" (UniqueName: \"kubernetes.io/projected/bb3ee979-be10-4582-9829-9684d7872059-kube-api-access-qflr7\") pod \"redhat-operators-8svf6\" (UID: \"bb3ee979-be10-4582-9829-9684d7872059\") " pod="openshift-marketplace/redhat-operators-8svf6" Nov 25 11:12:28 crc kubenswrapper[4854]: I1125 11:12:28.739096 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8svf6" Nov 25 11:12:29 crc kubenswrapper[4854]: I1125 11:12:29.636921 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8svf6"] Nov 25 11:12:29 crc kubenswrapper[4854]: I1125 11:12:29.687123 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8svf6" event={"ID":"bb3ee979-be10-4582-9829-9684d7872059","Type":"ContainerStarted","Data":"292b509af5c01854decff3af6438b36c28b7b978d8bd8da8d3a0c05c116c94f6"} Nov 25 11:12:30 crc kubenswrapper[4854]: I1125 11:12:30.699391 4854 generic.go:334] "Generic (PLEG): container finished" podID="bb3ee979-be10-4582-9829-9684d7872059" containerID="431d5b9d98acbd52c0357f9aea6442e3770c6348a2258155d777f99daa421a2b" exitCode=0 Nov 25 11:12:30 crc kubenswrapper[4854]: I1125 11:12:30.699505 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8svf6" event={"ID":"bb3ee979-be10-4582-9829-9684d7872059","Type":"ContainerDied","Data":"431d5b9d98acbd52c0357f9aea6442e3770c6348a2258155d777f99daa421a2b"} Nov 25 11:12:33 crc kubenswrapper[4854]: I1125 11:12:33.740302 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8svf6" event={"ID":"bb3ee979-be10-4582-9829-9684d7872059","Type":"ContainerStarted","Data":"7d0e0325882441d3e30f50c50f63b9c5a65328e8314e9b1b189744b489fef0e8"} Nov 25 11:12:48 crc kubenswrapper[4854]: I1125 11:12:48.927105 4854 generic.go:334] "Generic (PLEG): container finished" podID="bb3ee979-be10-4582-9829-9684d7872059" containerID="7d0e0325882441d3e30f50c50f63b9c5a65328e8314e9b1b189744b489fef0e8" exitCode=0 Nov 25 11:12:48 crc kubenswrapper[4854]: I1125 11:12:48.927305 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8svf6" event={"ID":"bb3ee979-be10-4582-9829-9684d7872059","Type":"ContainerDied","Data":"7d0e0325882441d3e30f50c50f63b9c5a65328e8314e9b1b189744b489fef0e8"} Nov 25 11:12:49 crc kubenswrapper[4854]: I1125 11:12:49.940735 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8svf6" event={"ID":"bb3ee979-be10-4582-9829-9684d7872059","Type":"ContainerStarted","Data":"33d93a3b150e95ed9ddc96a7bd04e6eccc7ca22467ee7af84c9105e6ea474189"} Nov 25 11:12:49 crc kubenswrapper[4854]: I1125 11:12:49.975129 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-8svf6" podStartSLOduration=4.34008446 podStartE2EDuration="22.973576074s" podCreationTimestamp="2025-11-25 11:12:27 +0000 UTC" firstStartedPulling="2025-11-25 11:12:30.702912697 +0000 UTC m=+5756.555906073" lastFinishedPulling="2025-11-25 11:12:49.336404311 +0000 UTC m=+5775.189397687" observedRunningTime="2025-11-25 11:12:49.95918899 +0000 UTC m=+5775.812182356" watchObservedRunningTime="2025-11-25 11:12:49.973576074 +0000 UTC m=+5775.826569450" Nov 25 11:12:55 crc kubenswrapper[4854]: I1125 11:12:55.028913 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 11:12:55 crc kubenswrapper[4854]: I1125 11:12:55.029435 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 11:12:58 crc kubenswrapper[4854]: I1125 11:12:58.741795 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-8svf6" Nov 25 11:12:58 crc kubenswrapper[4854]: I1125 11:12:58.743453 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-8svf6" Nov 25 11:12:59 crc kubenswrapper[4854]: I1125 11:12:59.807511 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-8svf6" podUID="bb3ee979-be10-4582-9829-9684d7872059" containerName="registry-server" probeResult="failure" output=< Nov 25 11:12:59 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:12:59 crc kubenswrapper[4854]: > Nov 25 11:13:09 crc kubenswrapper[4854]: I1125 11:13:09.790495 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-8svf6" podUID="bb3ee979-be10-4582-9829-9684d7872059" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:09 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:09 crc kubenswrapper[4854]: > Nov 25 11:13:20 crc kubenswrapper[4854]: I1125 11:13:20.065819 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-8svf6" podUID="bb3ee979-be10-4582-9829-9684d7872059" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:20 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:20 crc kubenswrapper[4854]: > Nov 25 11:13:25 crc kubenswrapper[4854]: I1125 11:13:25.028423 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 11:13:25 crc kubenswrapper[4854]: I1125 11:13:25.029855 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 11:13:29 crc kubenswrapper[4854]: I1125 11:13:29.791164 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-8svf6" podUID="bb3ee979-be10-4582-9829-9684d7872059" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:29 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:29 crc kubenswrapper[4854]: > Nov 25 11:13:38 crc kubenswrapper[4854]: I1125 11:13:38.792420 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-8svf6" Nov 25 11:13:38 crc kubenswrapper[4854]: I1125 11:13:38.854382 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-8svf6" Nov 25 11:13:39 crc kubenswrapper[4854]: I1125 11:13:39.031078 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8svf6"] Nov 25 11:13:40 crc kubenswrapper[4854]: I1125 11:13:40.522588 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-8svf6" podUID="bb3ee979-be10-4582-9829-9684d7872059" containerName="registry-server" containerID="cri-o://33d93a3b150e95ed9ddc96a7bd04e6eccc7ca22467ee7af84c9105e6ea474189" gracePeriod=2 Nov 25 11:13:40 crc kubenswrapper[4854]: E1125 11:13:40.738146 4854 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbb3ee979_be10_4582_9829_9684d7872059.slice/crio-conmon-33d93a3b150e95ed9ddc96a7bd04e6eccc7ca22467ee7af84c9105e6ea474189.scope\": RecentStats: unable to find data in memory cache]" Nov 25 11:13:41 crc kubenswrapper[4854]: I1125 11:13:41.150653 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8svf6" Nov 25 11:13:41 crc kubenswrapper[4854]: I1125 11:13:41.242461 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb3ee979-be10-4582-9829-9684d7872059-utilities\") pod \"bb3ee979-be10-4582-9829-9684d7872059\" (UID: \"bb3ee979-be10-4582-9829-9684d7872059\") " Nov 25 11:13:41 crc kubenswrapper[4854]: I1125 11:13:41.242789 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qflr7\" (UniqueName: \"kubernetes.io/projected/bb3ee979-be10-4582-9829-9684d7872059-kube-api-access-qflr7\") pod \"bb3ee979-be10-4582-9829-9684d7872059\" (UID: \"bb3ee979-be10-4582-9829-9684d7872059\") " Nov 25 11:13:41 crc kubenswrapper[4854]: I1125 11:13:41.242890 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb3ee979-be10-4582-9829-9684d7872059-catalog-content\") pod \"bb3ee979-be10-4582-9829-9684d7872059\" (UID: \"bb3ee979-be10-4582-9829-9684d7872059\") " Nov 25 11:13:41 crc kubenswrapper[4854]: I1125 11:13:41.249929 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bb3ee979-be10-4582-9829-9684d7872059-utilities" (OuterVolumeSpecName: "utilities") pod "bb3ee979-be10-4582-9829-9684d7872059" (UID: "bb3ee979-be10-4582-9829-9684d7872059"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:13:41 crc kubenswrapper[4854]: I1125 11:13:41.259898 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bb3ee979-be10-4582-9829-9684d7872059-kube-api-access-qflr7" (OuterVolumeSpecName: "kube-api-access-qflr7") pod "bb3ee979-be10-4582-9829-9684d7872059" (UID: "bb3ee979-be10-4582-9829-9684d7872059"). InnerVolumeSpecName "kube-api-access-qflr7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:13:41 crc kubenswrapper[4854]: I1125 11:13:41.345916 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb3ee979-be10-4582-9829-9684d7872059-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 11:13:41 crc kubenswrapper[4854]: I1125 11:13:41.345953 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qflr7\" (UniqueName: \"kubernetes.io/projected/bb3ee979-be10-4582-9829-9684d7872059-kube-api-access-qflr7\") on node \"crc\" DevicePath \"\"" Nov 25 11:13:41 crc kubenswrapper[4854]: I1125 11:13:41.423725 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bb3ee979-be10-4582-9829-9684d7872059-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bb3ee979-be10-4582-9829-9684d7872059" (UID: "bb3ee979-be10-4582-9829-9684d7872059"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:13:41 crc kubenswrapper[4854]: I1125 11:13:41.448930 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb3ee979-be10-4582-9829-9684d7872059-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 11:13:41 crc kubenswrapper[4854]: I1125 11:13:41.544237 4854 generic.go:334] "Generic (PLEG): container finished" podID="bb3ee979-be10-4582-9829-9684d7872059" containerID="33d93a3b150e95ed9ddc96a7bd04e6eccc7ca22467ee7af84c9105e6ea474189" exitCode=0 Nov 25 11:13:41 crc kubenswrapper[4854]: I1125 11:13:41.544292 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8svf6" Nov 25 11:13:41 crc kubenswrapper[4854]: I1125 11:13:41.544325 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8svf6" event={"ID":"bb3ee979-be10-4582-9829-9684d7872059","Type":"ContainerDied","Data":"33d93a3b150e95ed9ddc96a7bd04e6eccc7ca22467ee7af84c9105e6ea474189"} Nov 25 11:13:41 crc kubenswrapper[4854]: I1125 11:13:41.544392 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8svf6" event={"ID":"bb3ee979-be10-4582-9829-9684d7872059","Type":"ContainerDied","Data":"292b509af5c01854decff3af6438b36c28b7b978d8bd8da8d3a0c05c116c94f6"} Nov 25 11:13:41 crc kubenswrapper[4854]: I1125 11:13:41.544426 4854 scope.go:117] "RemoveContainer" containerID="33d93a3b150e95ed9ddc96a7bd04e6eccc7ca22467ee7af84c9105e6ea474189" Nov 25 11:13:41 crc kubenswrapper[4854]: I1125 11:13:41.604921 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8svf6"] Nov 25 11:13:41 crc kubenswrapper[4854]: I1125 11:13:41.613246 4854 scope.go:117] "RemoveContainer" containerID="7d0e0325882441d3e30f50c50f63b9c5a65328e8314e9b1b189744b489fef0e8" Nov 25 11:13:41 crc kubenswrapper[4854]: I1125 11:13:41.620773 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-8svf6"] Nov 25 11:13:41 crc kubenswrapper[4854]: I1125 11:13:41.686032 4854 scope.go:117] "RemoveContainer" containerID="431d5b9d98acbd52c0357f9aea6442e3770c6348a2258155d777f99daa421a2b" Nov 25 11:13:41 crc kubenswrapper[4854]: I1125 11:13:41.718593 4854 scope.go:117] "RemoveContainer" containerID="33d93a3b150e95ed9ddc96a7bd04e6eccc7ca22467ee7af84c9105e6ea474189" Nov 25 11:13:41 crc kubenswrapper[4854]: E1125 11:13:41.720088 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"33d93a3b150e95ed9ddc96a7bd04e6eccc7ca22467ee7af84c9105e6ea474189\": container with ID starting with 33d93a3b150e95ed9ddc96a7bd04e6eccc7ca22467ee7af84c9105e6ea474189 not found: ID does not exist" containerID="33d93a3b150e95ed9ddc96a7bd04e6eccc7ca22467ee7af84c9105e6ea474189" Nov 25 11:13:41 crc kubenswrapper[4854]: I1125 11:13:41.720132 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"33d93a3b150e95ed9ddc96a7bd04e6eccc7ca22467ee7af84c9105e6ea474189"} err="failed to get container status \"33d93a3b150e95ed9ddc96a7bd04e6eccc7ca22467ee7af84c9105e6ea474189\": rpc error: code = NotFound desc = could not find container \"33d93a3b150e95ed9ddc96a7bd04e6eccc7ca22467ee7af84c9105e6ea474189\": container with ID starting with 33d93a3b150e95ed9ddc96a7bd04e6eccc7ca22467ee7af84c9105e6ea474189 not found: ID does not exist" Nov 25 11:13:41 crc kubenswrapper[4854]: I1125 11:13:41.720159 4854 scope.go:117] "RemoveContainer" containerID="7d0e0325882441d3e30f50c50f63b9c5a65328e8314e9b1b189744b489fef0e8" Nov 25 11:13:41 crc kubenswrapper[4854]: E1125 11:13:41.720544 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d0e0325882441d3e30f50c50f63b9c5a65328e8314e9b1b189744b489fef0e8\": container with ID starting with 7d0e0325882441d3e30f50c50f63b9c5a65328e8314e9b1b189744b489fef0e8 not found: ID does not exist" containerID="7d0e0325882441d3e30f50c50f63b9c5a65328e8314e9b1b189744b489fef0e8" Nov 25 11:13:41 crc kubenswrapper[4854]: I1125 11:13:41.720575 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d0e0325882441d3e30f50c50f63b9c5a65328e8314e9b1b189744b489fef0e8"} err="failed to get container status \"7d0e0325882441d3e30f50c50f63b9c5a65328e8314e9b1b189744b489fef0e8\": rpc error: code = NotFound desc = could not find container \"7d0e0325882441d3e30f50c50f63b9c5a65328e8314e9b1b189744b489fef0e8\": container with ID starting with 7d0e0325882441d3e30f50c50f63b9c5a65328e8314e9b1b189744b489fef0e8 not found: ID does not exist" Nov 25 11:13:41 crc kubenswrapper[4854]: I1125 11:13:41.720592 4854 scope.go:117] "RemoveContainer" containerID="431d5b9d98acbd52c0357f9aea6442e3770c6348a2258155d777f99daa421a2b" Nov 25 11:13:41 crc kubenswrapper[4854]: E1125 11:13:41.720946 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"431d5b9d98acbd52c0357f9aea6442e3770c6348a2258155d777f99daa421a2b\": container with ID starting with 431d5b9d98acbd52c0357f9aea6442e3770c6348a2258155d777f99daa421a2b not found: ID does not exist" containerID="431d5b9d98acbd52c0357f9aea6442e3770c6348a2258155d777f99daa421a2b" Nov 25 11:13:41 crc kubenswrapper[4854]: I1125 11:13:41.720989 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"431d5b9d98acbd52c0357f9aea6442e3770c6348a2258155d777f99daa421a2b"} err="failed to get container status \"431d5b9d98acbd52c0357f9aea6442e3770c6348a2258155d777f99daa421a2b\": rpc error: code = NotFound desc = could not find container \"431d5b9d98acbd52c0357f9aea6442e3770c6348a2258155d777f99daa421a2b\": container with ID starting with 431d5b9d98acbd52c0357f9aea6442e3770c6348a2258155d777f99daa421a2b not found: ID does not exist" Nov 25 11:13:43 crc kubenswrapper[4854]: I1125 11:13:43.028942 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bb3ee979-be10-4582-9829-9684d7872059" path="/var/lib/kubelet/pods/bb3ee979-be10-4582-9829-9684d7872059/volumes" Nov 25 11:13:55 crc kubenswrapper[4854]: I1125 11:13:55.028559 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 11:13:55 crc kubenswrapper[4854]: I1125 11:13:55.029254 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 11:13:55 crc kubenswrapper[4854]: I1125 11:13:55.031713 4854 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" Nov 25 11:13:55 crc kubenswrapper[4854]: I1125 11:13:55.033823 4854 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f13c3fdaf7261b1a24dcc71b8b219a5a1847806fac4c3d9c9b94540b0bcd3272"} pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 11:13:55 crc kubenswrapper[4854]: I1125 11:13:55.034012 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" containerID="cri-o://f13c3fdaf7261b1a24dcc71b8b219a5a1847806fac4c3d9c9b94540b0bcd3272" gracePeriod=600 Nov 25 11:13:55 crc kubenswrapper[4854]: E1125 11:13:55.161313 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:13:55 crc kubenswrapper[4854]: I1125 11:13:55.704085 4854 generic.go:334] "Generic (PLEG): container finished" podID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerID="f13c3fdaf7261b1a24dcc71b8b219a5a1847806fac4c3d9c9b94540b0bcd3272" exitCode=0 Nov 25 11:13:55 crc kubenswrapper[4854]: I1125 11:13:55.704127 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerDied","Data":"f13c3fdaf7261b1a24dcc71b8b219a5a1847806fac4c3d9c9b94540b0bcd3272"} Nov 25 11:13:55 crc kubenswrapper[4854]: I1125 11:13:55.704465 4854 scope.go:117] "RemoveContainer" containerID="59b9dc5684fbe6cb863e0bbff479f5dd9642d5514beb86a0888753412497703f" Nov 25 11:13:55 crc kubenswrapper[4854]: I1125 11:13:55.705338 4854 scope.go:117] "RemoveContainer" containerID="f13c3fdaf7261b1a24dcc71b8b219a5a1847806fac4c3d9c9b94540b0bcd3272" Nov 25 11:13:55 crc kubenswrapper[4854]: E1125 11:13:55.705702 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:14:11 crc kubenswrapper[4854]: I1125 11:14:11.013808 4854 scope.go:117] "RemoveContainer" containerID="f13c3fdaf7261b1a24dcc71b8b219a5a1847806fac4c3d9c9b94540b0bcd3272" Nov 25 11:14:11 crc kubenswrapper[4854]: E1125 11:14:11.015066 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:14:22 crc kubenswrapper[4854]: I1125 11:14:22.014329 4854 scope.go:117] "RemoveContainer" containerID="f13c3fdaf7261b1a24dcc71b8b219a5a1847806fac4c3d9c9b94540b0bcd3272" Nov 25 11:14:22 crc kubenswrapper[4854]: E1125 11:14:22.015453 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:14:34 crc kubenswrapper[4854]: I1125 11:14:34.013201 4854 scope.go:117] "RemoveContainer" containerID="f13c3fdaf7261b1a24dcc71b8b219a5a1847806fac4c3d9c9b94540b0bcd3272" Nov 25 11:14:34 crc kubenswrapper[4854]: E1125 11:14:34.014098 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:14:49 crc kubenswrapper[4854]: I1125 11:14:49.014449 4854 scope.go:117] "RemoveContainer" containerID="f13c3fdaf7261b1a24dcc71b8b219a5a1847806fac4c3d9c9b94540b0bcd3272" Nov 25 11:14:49 crc kubenswrapper[4854]: E1125 11:14:49.015236 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:15:00 crc kubenswrapper[4854]: I1125 11:15:00.253863 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401155-t49jb"] Nov 25 11:15:00 crc kubenswrapper[4854]: E1125 11:15:00.255394 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb3ee979-be10-4582-9829-9684d7872059" containerName="extract-utilities" Nov 25 11:15:00 crc kubenswrapper[4854]: I1125 11:15:00.255434 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb3ee979-be10-4582-9829-9684d7872059" containerName="extract-utilities" Nov 25 11:15:00 crc kubenswrapper[4854]: E1125 11:15:00.255463 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb3ee979-be10-4582-9829-9684d7872059" containerName="extract-content" Nov 25 11:15:00 crc kubenswrapper[4854]: I1125 11:15:00.255472 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb3ee979-be10-4582-9829-9684d7872059" containerName="extract-content" Nov 25 11:15:00 crc kubenswrapper[4854]: E1125 11:15:00.255488 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb3ee979-be10-4582-9829-9684d7872059" containerName="registry-server" Nov 25 11:15:00 crc kubenswrapper[4854]: I1125 11:15:00.255496 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb3ee979-be10-4582-9829-9684d7872059" containerName="registry-server" Nov 25 11:15:00 crc kubenswrapper[4854]: I1125 11:15:00.255949 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb3ee979-be10-4582-9829-9684d7872059" containerName="registry-server" Nov 25 11:15:00 crc kubenswrapper[4854]: I1125 11:15:00.257159 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-t49jb" Nov 25 11:15:00 crc kubenswrapper[4854]: I1125 11:15:00.278274 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401155-t49jb"] Nov 25 11:15:00 crc kubenswrapper[4854]: I1125 11:15:00.285723 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cea3c0af-01f6-4da7-aee1-2cae674e3846-config-volume\") pod \"collect-profiles-29401155-t49jb\" (UID: \"cea3c0af-01f6-4da7-aee1-2cae674e3846\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-t49jb" Nov 25 11:15:00 crc kubenswrapper[4854]: I1125 11:15:00.285809 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gbf46\" (UniqueName: \"kubernetes.io/projected/cea3c0af-01f6-4da7-aee1-2cae674e3846-kube-api-access-gbf46\") pod \"collect-profiles-29401155-t49jb\" (UID: \"cea3c0af-01f6-4da7-aee1-2cae674e3846\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-t49jb" Nov 25 11:15:00 crc kubenswrapper[4854]: I1125 11:15:00.285988 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 11:15:00 crc kubenswrapper[4854]: I1125 11:15:00.286020 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 11:15:00 crc kubenswrapper[4854]: I1125 11:15:00.285997 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cea3c0af-01f6-4da7-aee1-2cae674e3846-secret-volume\") pod \"collect-profiles-29401155-t49jb\" (UID: \"cea3c0af-01f6-4da7-aee1-2cae674e3846\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-t49jb" Nov 25 11:15:00 crc kubenswrapper[4854]: I1125 11:15:00.388819 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gbf46\" (UniqueName: \"kubernetes.io/projected/cea3c0af-01f6-4da7-aee1-2cae674e3846-kube-api-access-gbf46\") pod \"collect-profiles-29401155-t49jb\" (UID: \"cea3c0af-01f6-4da7-aee1-2cae674e3846\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-t49jb" Nov 25 11:15:00 crc kubenswrapper[4854]: I1125 11:15:00.389054 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cea3c0af-01f6-4da7-aee1-2cae674e3846-secret-volume\") pod \"collect-profiles-29401155-t49jb\" (UID: \"cea3c0af-01f6-4da7-aee1-2cae674e3846\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-t49jb" Nov 25 11:15:00 crc kubenswrapper[4854]: I1125 11:15:00.389176 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cea3c0af-01f6-4da7-aee1-2cae674e3846-config-volume\") pod \"collect-profiles-29401155-t49jb\" (UID: \"cea3c0af-01f6-4da7-aee1-2cae674e3846\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-t49jb" Nov 25 11:15:00 crc kubenswrapper[4854]: I1125 11:15:00.390131 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cea3c0af-01f6-4da7-aee1-2cae674e3846-config-volume\") pod \"collect-profiles-29401155-t49jb\" (UID: \"cea3c0af-01f6-4da7-aee1-2cae674e3846\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-t49jb" Nov 25 11:15:00 crc kubenswrapper[4854]: I1125 11:15:00.971212 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cea3c0af-01f6-4da7-aee1-2cae674e3846-secret-volume\") pod \"collect-profiles-29401155-t49jb\" (UID: \"cea3c0af-01f6-4da7-aee1-2cae674e3846\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-t49jb" Nov 25 11:15:00 crc kubenswrapper[4854]: I1125 11:15:00.976119 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gbf46\" (UniqueName: \"kubernetes.io/projected/cea3c0af-01f6-4da7-aee1-2cae674e3846-kube-api-access-gbf46\") pod \"collect-profiles-29401155-t49jb\" (UID: \"cea3c0af-01f6-4da7-aee1-2cae674e3846\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-t49jb" Nov 25 11:15:01 crc kubenswrapper[4854]: I1125 11:15:01.195338 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-t49jb" Nov 25 11:15:01 crc kubenswrapper[4854]: I1125 11:15:01.713595 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401155-t49jb"] Nov 25 11:15:02 crc kubenswrapper[4854]: I1125 11:15:02.013553 4854 scope.go:117] "RemoveContainer" containerID="f13c3fdaf7261b1a24dcc71b8b219a5a1847806fac4c3d9c9b94540b0bcd3272" Nov 25 11:15:02 crc kubenswrapper[4854]: E1125 11:15:02.014232 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:15:02 crc kubenswrapper[4854]: I1125 11:15:02.508543 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-t49jb" event={"ID":"cea3c0af-01f6-4da7-aee1-2cae674e3846","Type":"ContainerStarted","Data":"7294dd576937207a9f1157581fd2a028a4f3a9097fa970c6cebb235968ab92f7"} Nov 25 11:15:02 crc kubenswrapper[4854]: I1125 11:15:02.509420 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-t49jb" event={"ID":"cea3c0af-01f6-4da7-aee1-2cae674e3846","Type":"ContainerStarted","Data":"f1eabc3766197ef09ff2d849e5c0371b228fa1472675c729e4edfd16cfd96e2d"} Nov 25 11:15:02 crc kubenswrapper[4854]: I1125 11:15:02.540852 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-t49jb" podStartSLOduration=2.540744815 podStartE2EDuration="2.540744815s" podCreationTimestamp="2025-11-25 11:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 11:15:02.528922471 +0000 UTC m=+5908.381915867" watchObservedRunningTime="2025-11-25 11:15:02.540744815 +0000 UTC m=+5908.393738191" Nov 25 11:15:03 crc kubenswrapper[4854]: I1125 11:15:03.525066 4854 generic.go:334] "Generic (PLEG): container finished" podID="cea3c0af-01f6-4da7-aee1-2cae674e3846" containerID="7294dd576937207a9f1157581fd2a028a4f3a9097fa970c6cebb235968ab92f7" exitCode=0 Nov 25 11:15:03 crc kubenswrapper[4854]: I1125 11:15:03.525137 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-t49jb" event={"ID":"cea3c0af-01f6-4da7-aee1-2cae674e3846","Type":"ContainerDied","Data":"7294dd576937207a9f1157581fd2a028a4f3a9097fa970c6cebb235968ab92f7"} Nov 25 11:15:04 crc kubenswrapper[4854]: I1125 11:15:04.983046 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-t49jb" Nov 25 11:15:05 crc kubenswrapper[4854]: I1125 11:15:05.037226 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cea3c0af-01f6-4da7-aee1-2cae674e3846-secret-volume\") pod \"cea3c0af-01f6-4da7-aee1-2cae674e3846\" (UID: \"cea3c0af-01f6-4da7-aee1-2cae674e3846\") " Nov 25 11:15:05 crc kubenswrapper[4854]: I1125 11:15:05.037933 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gbf46\" (UniqueName: \"kubernetes.io/projected/cea3c0af-01f6-4da7-aee1-2cae674e3846-kube-api-access-gbf46\") pod \"cea3c0af-01f6-4da7-aee1-2cae674e3846\" (UID: \"cea3c0af-01f6-4da7-aee1-2cae674e3846\") " Nov 25 11:15:05 crc kubenswrapper[4854]: I1125 11:15:05.038333 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cea3c0af-01f6-4da7-aee1-2cae674e3846-config-volume\") pod \"cea3c0af-01f6-4da7-aee1-2cae674e3846\" (UID: \"cea3c0af-01f6-4da7-aee1-2cae674e3846\") " Nov 25 11:15:05 crc kubenswrapper[4854]: I1125 11:15:05.039141 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cea3c0af-01f6-4da7-aee1-2cae674e3846-config-volume" (OuterVolumeSpecName: "config-volume") pod "cea3c0af-01f6-4da7-aee1-2cae674e3846" (UID: "cea3c0af-01f6-4da7-aee1-2cae674e3846"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 11:15:05 crc kubenswrapper[4854]: I1125 11:15:05.044502 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cea3c0af-01f6-4da7-aee1-2cae674e3846-kube-api-access-gbf46" (OuterVolumeSpecName: "kube-api-access-gbf46") pod "cea3c0af-01f6-4da7-aee1-2cae674e3846" (UID: "cea3c0af-01f6-4da7-aee1-2cae674e3846"). InnerVolumeSpecName "kube-api-access-gbf46". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:15:05 crc kubenswrapper[4854]: I1125 11:15:05.047525 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cea3c0af-01f6-4da7-aee1-2cae674e3846-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "cea3c0af-01f6-4da7-aee1-2cae674e3846" (UID: "cea3c0af-01f6-4da7-aee1-2cae674e3846"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:15:05 crc kubenswrapper[4854]: I1125 11:15:05.142753 4854 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cea3c0af-01f6-4da7-aee1-2cae674e3846-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 11:15:05 crc kubenswrapper[4854]: I1125 11:15:05.142833 4854 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cea3c0af-01f6-4da7-aee1-2cae674e3846-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 11:15:05 crc kubenswrapper[4854]: I1125 11:15:05.142871 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gbf46\" (UniqueName: \"kubernetes.io/projected/cea3c0af-01f6-4da7-aee1-2cae674e3846-kube-api-access-gbf46\") on node \"crc\" DevicePath \"\"" Nov 25 11:15:05 crc kubenswrapper[4854]: I1125 11:15:05.550537 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-t49jb" event={"ID":"cea3c0af-01f6-4da7-aee1-2cae674e3846","Type":"ContainerDied","Data":"f1eabc3766197ef09ff2d849e5c0371b228fa1472675c729e4edfd16cfd96e2d"} Nov 25 11:15:05 crc kubenswrapper[4854]: I1125 11:15:05.550588 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f1eabc3766197ef09ff2d849e5c0371b228fa1472675c729e4edfd16cfd96e2d" Nov 25 11:15:05 crc kubenswrapper[4854]: I1125 11:15:05.550646 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-t49jb" Nov 25 11:15:06 crc kubenswrapper[4854]: I1125 11:15:06.230761 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401110-s5krq"] Nov 25 11:15:06 crc kubenswrapper[4854]: I1125 11:15:06.242190 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401110-s5krq"] Nov 25 11:15:07 crc kubenswrapper[4854]: I1125 11:15:07.028180 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a5818cf0-e565-45d7-a13b-8a6e5a9a7a45" path="/var/lib/kubelet/pods/a5818cf0-e565-45d7-a13b-8a6e5a9a7a45/volumes" Nov 25 11:15:15 crc kubenswrapper[4854]: I1125 11:15:15.022854 4854 scope.go:117] "RemoveContainer" containerID="f13c3fdaf7261b1a24dcc71b8b219a5a1847806fac4c3d9c9b94540b0bcd3272" Nov 25 11:15:15 crc kubenswrapper[4854]: E1125 11:15:15.023737 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:15:30 crc kubenswrapper[4854]: I1125 11:15:30.013880 4854 scope.go:117] "RemoveContainer" containerID="f13c3fdaf7261b1a24dcc71b8b219a5a1847806fac4c3d9c9b94540b0bcd3272" Nov 25 11:15:30 crc kubenswrapper[4854]: E1125 11:15:30.014820 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:15:34 crc kubenswrapper[4854]: I1125 11:15:34.315440 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-tkbkp"] Nov 25 11:15:34 crc kubenswrapper[4854]: E1125 11:15:34.316612 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cea3c0af-01f6-4da7-aee1-2cae674e3846" containerName="collect-profiles" Nov 25 11:15:34 crc kubenswrapper[4854]: I1125 11:15:34.316628 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="cea3c0af-01f6-4da7-aee1-2cae674e3846" containerName="collect-profiles" Nov 25 11:15:34 crc kubenswrapper[4854]: I1125 11:15:34.316930 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="cea3c0af-01f6-4da7-aee1-2cae674e3846" containerName="collect-profiles" Nov 25 11:15:34 crc kubenswrapper[4854]: I1125 11:15:34.318993 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tkbkp" Nov 25 11:15:34 crc kubenswrapper[4854]: I1125 11:15:34.331160 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tkbkp"] Nov 25 11:15:34 crc kubenswrapper[4854]: I1125 11:15:34.434361 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dacf4cba-0662-4f35-a33b-6802b143d3b7-utilities\") pod \"certified-operators-tkbkp\" (UID: \"dacf4cba-0662-4f35-a33b-6802b143d3b7\") " pod="openshift-marketplace/certified-operators-tkbkp" Nov 25 11:15:34 crc kubenswrapper[4854]: I1125 11:15:34.434425 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dacf4cba-0662-4f35-a33b-6802b143d3b7-catalog-content\") pod \"certified-operators-tkbkp\" (UID: \"dacf4cba-0662-4f35-a33b-6802b143d3b7\") " pod="openshift-marketplace/certified-operators-tkbkp" Nov 25 11:15:34 crc kubenswrapper[4854]: I1125 11:15:34.434824 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lphr6\" (UniqueName: \"kubernetes.io/projected/dacf4cba-0662-4f35-a33b-6802b143d3b7-kube-api-access-lphr6\") pod \"certified-operators-tkbkp\" (UID: \"dacf4cba-0662-4f35-a33b-6802b143d3b7\") " pod="openshift-marketplace/certified-operators-tkbkp" Nov 25 11:15:34 crc kubenswrapper[4854]: I1125 11:15:34.537778 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dacf4cba-0662-4f35-a33b-6802b143d3b7-utilities\") pod \"certified-operators-tkbkp\" (UID: \"dacf4cba-0662-4f35-a33b-6802b143d3b7\") " pod="openshift-marketplace/certified-operators-tkbkp" Nov 25 11:15:34 crc kubenswrapper[4854]: I1125 11:15:34.537839 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dacf4cba-0662-4f35-a33b-6802b143d3b7-catalog-content\") pod \"certified-operators-tkbkp\" (UID: \"dacf4cba-0662-4f35-a33b-6802b143d3b7\") " pod="openshift-marketplace/certified-operators-tkbkp" Nov 25 11:15:34 crc kubenswrapper[4854]: I1125 11:15:34.537967 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lphr6\" (UniqueName: \"kubernetes.io/projected/dacf4cba-0662-4f35-a33b-6802b143d3b7-kube-api-access-lphr6\") pod \"certified-operators-tkbkp\" (UID: \"dacf4cba-0662-4f35-a33b-6802b143d3b7\") " pod="openshift-marketplace/certified-operators-tkbkp" Nov 25 11:15:34 crc kubenswrapper[4854]: I1125 11:15:34.538550 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dacf4cba-0662-4f35-a33b-6802b143d3b7-utilities\") pod \"certified-operators-tkbkp\" (UID: \"dacf4cba-0662-4f35-a33b-6802b143d3b7\") " pod="openshift-marketplace/certified-operators-tkbkp" Nov 25 11:15:34 crc kubenswrapper[4854]: I1125 11:15:34.538725 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dacf4cba-0662-4f35-a33b-6802b143d3b7-catalog-content\") pod \"certified-operators-tkbkp\" (UID: \"dacf4cba-0662-4f35-a33b-6802b143d3b7\") " pod="openshift-marketplace/certified-operators-tkbkp" Nov 25 11:15:34 crc kubenswrapper[4854]: I1125 11:15:34.584841 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lphr6\" (UniqueName: \"kubernetes.io/projected/dacf4cba-0662-4f35-a33b-6802b143d3b7-kube-api-access-lphr6\") pod \"certified-operators-tkbkp\" (UID: \"dacf4cba-0662-4f35-a33b-6802b143d3b7\") " pod="openshift-marketplace/certified-operators-tkbkp" Nov 25 11:15:34 crc kubenswrapper[4854]: I1125 11:15:34.647826 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tkbkp" Nov 25 11:15:35 crc kubenswrapper[4854]: I1125 11:15:35.123165 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tkbkp"] Nov 25 11:15:35 crc kubenswrapper[4854]: I1125 11:15:35.905115 4854 generic.go:334] "Generic (PLEG): container finished" podID="dacf4cba-0662-4f35-a33b-6802b143d3b7" containerID="9a824dedb6545da144200fab04b7ef919e242f1a7dac847ff38d8b43161146e1" exitCode=0 Nov 25 11:15:35 crc kubenswrapper[4854]: I1125 11:15:35.905400 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tkbkp" event={"ID":"dacf4cba-0662-4f35-a33b-6802b143d3b7","Type":"ContainerDied","Data":"9a824dedb6545da144200fab04b7ef919e242f1a7dac847ff38d8b43161146e1"} Nov 25 11:15:35 crc kubenswrapper[4854]: I1125 11:15:35.905428 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tkbkp" event={"ID":"dacf4cba-0662-4f35-a33b-6802b143d3b7","Type":"ContainerStarted","Data":"90514a1450e604e82bd96c9798fad4a2884946f70faead7650ca0704c530b6f1"} Nov 25 11:15:35 crc kubenswrapper[4854]: I1125 11:15:35.907639 4854 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 11:15:36 crc kubenswrapper[4854]: I1125 11:15:36.919765 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tkbkp" event={"ID":"dacf4cba-0662-4f35-a33b-6802b143d3b7","Type":"ContainerStarted","Data":"4adc926b0122d893b6274b0db668c85993a69fd95e46e9ea3cdf2840f8ed8bc4"} Nov 25 11:15:38 crc kubenswrapper[4854]: I1125 11:15:38.941792 4854 generic.go:334] "Generic (PLEG): container finished" podID="dacf4cba-0662-4f35-a33b-6802b143d3b7" containerID="4adc926b0122d893b6274b0db668c85993a69fd95e46e9ea3cdf2840f8ed8bc4" exitCode=0 Nov 25 11:15:38 crc kubenswrapper[4854]: I1125 11:15:38.941855 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tkbkp" event={"ID":"dacf4cba-0662-4f35-a33b-6802b143d3b7","Type":"ContainerDied","Data":"4adc926b0122d893b6274b0db668c85993a69fd95e46e9ea3cdf2840f8ed8bc4"} Nov 25 11:15:39 crc kubenswrapper[4854]: I1125 11:15:39.957251 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tkbkp" event={"ID":"dacf4cba-0662-4f35-a33b-6802b143d3b7","Type":"ContainerStarted","Data":"61e6b6ef535635f954ad448fb68ac962b6fa27ea046bd698d29faaf2bc887021"} Nov 25 11:15:39 crc kubenswrapper[4854]: I1125 11:15:39.984725 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-tkbkp" podStartSLOduration=2.40960085 podStartE2EDuration="5.984702781s" podCreationTimestamp="2025-11-25 11:15:34 +0000 UTC" firstStartedPulling="2025-11-25 11:15:35.907438059 +0000 UTC m=+5941.760431435" lastFinishedPulling="2025-11-25 11:15:39.48253999 +0000 UTC m=+5945.335533366" observedRunningTime="2025-11-25 11:15:39.977010081 +0000 UTC m=+5945.830003487" watchObservedRunningTime="2025-11-25 11:15:39.984702781 +0000 UTC m=+5945.837696167" Nov 25 11:15:44 crc kubenswrapper[4854]: I1125 11:15:44.014063 4854 scope.go:117] "RemoveContainer" containerID="f13c3fdaf7261b1a24dcc71b8b219a5a1847806fac4c3d9c9b94540b0bcd3272" Nov 25 11:15:44 crc kubenswrapper[4854]: E1125 11:15:44.015204 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:15:44 crc kubenswrapper[4854]: I1125 11:15:44.648943 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-tkbkp" Nov 25 11:15:44 crc kubenswrapper[4854]: I1125 11:15:44.650097 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-tkbkp" Nov 25 11:15:44 crc kubenswrapper[4854]: I1125 11:15:44.711483 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-tkbkp" Nov 25 11:15:45 crc kubenswrapper[4854]: I1125 11:15:45.084615 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-tkbkp" Nov 25 11:15:45 crc kubenswrapper[4854]: I1125 11:15:45.150699 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tkbkp"] Nov 25 11:15:47 crc kubenswrapper[4854]: I1125 11:15:47.061416 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-tkbkp" podUID="dacf4cba-0662-4f35-a33b-6802b143d3b7" containerName="registry-server" containerID="cri-o://61e6b6ef535635f954ad448fb68ac962b6fa27ea046bd698d29faaf2bc887021" gracePeriod=2 Nov 25 11:15:47 crc kubenswrapper[4854]: I1125 11:15:47.417874 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-bbqnd"] Nov 25 11:15:47 crc kubenswrapper[4854]: I1125 11:15:47.423861 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bbqnd" Nov 25 11:15:47 crc kubenswrapper[4854]: I1125 11:15:47.473042 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bbqnd"] Nov 25 11:15:47 crc kubenswrapper[4854]: I1125 11:15:47.535840 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/528249c7-b26d-4ef1-9963-70e1dd171b3a-catalog-content\") pod \"redhat-marketplace-bbqnd\" (UID: \"528249c7-b26d-4ef1-9963-70e1dd171b3a\") " pod="openshift-marketplace/redhat-marketplace-bbqnd" Nov 25 11:15:47 crc kubenswrapper[4854]: I1125 11:15:47.536021 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rvpd2\" (UniqueName: \"kubernetes.io/projected/528249c7-b26d-4ef1-9963-70e1dd171b3a-kube-api-access-rvpd2\") pod \"redhat-marketplace-bbqnd\" (UID: \"528249c7-b26d-4ef1-9963-70e1dd171b3a\") " pod="openshift-marketplace/redhat-marketplace-bbqnd" Nov 25 11:15:47 crc kubenswrapper[4854]: I1125 11:15:47.536875 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/528249c7-b26d-4ef1-9963-70e1dd171b3a-utilities\") pod \"redhat-marketplace-bbqnd\" (UID: \"528249c7-b26d-4ef1-9963-70e1dd171b3a\") " pod="openshift-marketplace/redhat-marketplace-bbqnd" Nov 25 11:15:47 crc kubenswrapper[4854]: I1125 11:15:47.639244 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/528249c7-b26d-4ef1-9963-70e1dd171b3a-catalog-content\") pod \"redhat-marketplace-bbqnd\" (UID: \"528249c7-b26d-4ef1-9963-70e1dd171b3a\") " pod="openshift-marketplace/redhat-marketplace-bbqnd" Nov 25 11:15:47 crc kubenswrapper[4854]: I1125 11:15:47.639343 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rvpd2\" (UniqueName: \"kubernetes.io/projected/528249c7-b26d-4ef1-9963-70e1dd171b3a-kube-api-access-rvpd2\") pod \"redhat-marketplace-bbqnd\" (UID: \"528249c7-b26d-4ef1-9963-70e1dd171b3a\") " pod="openshift-marketplace/redhat-marketplace-bbqnd" Nov 25 11:15:47 crc kubenswrapper[4854]: I1125 11:15:47.639442 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/528249c7-b26d-4ef1-9963-70e1dd171b3a-utilities\") pod \"redhat-marketplace-bbqnd\" (UID: \"528249c7-b26d-4ef1-9963-70e1dd171b3a\") " pod="openshift-marketplace/redhat-marketplace-bbqnd" Nov 25 11:15:47 crc kubenswrapper[4854]: I1125 11:15:47.640266 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/528249c7-b26d-4ef1-9963-70e1dd171b3a-catalog-content\") pod \"redhat-marketplace-bbqnd\" (UID: \"528249c7-b26d-4ef1-9963-70e1dd171b3a\") " pod="openshift-marketplace/redhat-marketplace-bbqnd" Nov 25 11:15:47 crc kubenswrapper[4854]: I1125 11:15:47.640319 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/528249c7-b26d-4ef1-9963-70e1dd171b3a-utilities\") pod \"redhat-marketplace-bbqnd\" (UID: \"528249c7-b26d-4ef1-9963-70e1dd171b3a\") " pod="openshift-marketplace/redhat-marketplace-bbqnd" Nov 25 11:15:47 crc kubenswrapper[4854]: I1125 11:15:47.677867 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tkbkp" Nov 25 11:15:47 crc kubenswrapper[4854]: I1125 11:15:47.778469 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rvpd2\" (UniqueName: \"kubernetes.io/projected/528249c7-b26d-4ef1-9963-70e1dd171b3a-kube-api-access-rvpd2\") pod \"redhat-marketplace-bbqnd\" (UID: \"528249c7-b26d-4ef1-9963-70e1dd171b3a\") " pod="openshift-marketplace/redhat-marketplace-bbqnd" Nov 25 11:15:47 crc kubenswrapper[4854]: I1125 11:15:47.844443 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lphr6\" (UniqueName: \"kubernetes.io/projected/dacf4cba-0662-4f35-a33b-6802b143d3b7-kube-api-access-lphr6\") pod \"dacf4cba-0662-4f35-a33b-6802b143d3b7\" (UID: \"dacf4cba-0662-4f35-a33b-6802b143d3b7\") " Nov 25 11:15:47 crc kubenswrapper[4854]: I1125 11:15:47.844744 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dacf4cba-0662-4f35-a33b-6802b143d3b7-catalog-content\") pod \"dacf4cba-0662-4f35-a33b-6802b143d3b7\" (UID: \"dacf4cba-0662-4f35-a33b-6802b143d3b7\") " Nov 25 11:15:47 crc kubenswrapper[4854]: I1125 11:15:47.844907 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dacf4cba-0662-4f35-a33b-6802b143d3b7-utilities\") pod \"dacf4cba-0662-4f35-a33b-6802b143d3b7\" (UID: \"dacf4cba-0662-4f35-a33b-6802b143d3b7\") " Nov 25 11:15:47 crc kubenswrapper[4854]: I1125 11:15:47.846012 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dacf4cba-0662-4f35-a33b-6802b143d3b7-utilities" (OuterVolumeSpecName: "utilities") pod "dacf4cba-0662-4f35-a33b-6802b143d3b7" (UID: "dacf4cba-0662-4f35-a33b-6802b143d3b7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:15:47 crc kubenswrapper[4854]: I1125 11:15:47.852452 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dacf4cba-0662-4f35-a33b-6802b143d3b7-kube-api-access-lphr6" (OuterVolumeSpecName: "kube-api-access-lphr6") pod "dacf4cba-0662-4f35-a33b-6802b143d3b7" (UID: "dacf4cba-0662-4f35-a33b-6802b143d3b7"). InnerVolumeSpecName "kube-api-access-lphr6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:15:47 crc kubenswrapper[4854]: I1125 11:15:47.892906 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dacf4cba-0662-4f35-a33b-6802b143d3b7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dacf4cba-0662-4f35-a33b-6802b143d3b7" (UID: "dacf4cba-0662-4f35-a33b-6802b143d3b7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:15:47 crc kubenswrapper[4854]: I1125 11:15:47.948908 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dacf4cba-0662-4f35-a33b-6802b143d3b7-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 11:15:47 crc kubenswrapper[4854]: I1125 11:15:47.948953 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lphr6\" (UniqueName: \"kubernetes.io/projected/dacf4cba-0662-4f35-a33b-6802b143d3b7-kube-api-access-lphr6\") on node \"crc\" DevicePath \"\"" Nov 25 11:15:47 crc kubenswrapper[4854]: I1125 11:15:47.948968 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dacf4cba-0662-4f35-a33b-6802b143d3b7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 11:15:48 crc kubenswrapper[4854]: I1125 11:15:48.071182 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bbqnd" Nov 25 11:15:48 crc kubenswrapper[4854]: I1125 11:15:48.081825 4854 generic.go:334] "Generic (PLEG): container finished" podID="dacf4cba-0662-4f35-a33b-6802b143d3b7" containerID="61e6b6ef535635f954ad448fb68ac962b6fa27ea046bd698d29faaf2bc887021" exitCode=0 Nov 25 11:15:48 crc kubenswrapper[4854]: I1125 11:15:48.081878 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tkbkp" event={"ID":"dacf4cba-0662-4f35-a33b-6802b143d3b7","Type":"ContainerDied","Data":"61e6b6ef535635f954ad448fb68ac962b6fa27ea046bd698d29faaf2bc887021"} Nov 25 11:15:48 crc kubenswrapper[4854]: I1125 11:15:48.081911 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tkbkp" event={"ID":"dacf4cba-0662-4f35-a33b-6802b143d3b7","Type":"ContainerDied","Data":"90514a1450e604e82bd96c9798fad4a2884946f70faead7650ca0704c530b6f1"} Nov 25 11:15:48 crc kubenswrapper[4854]: I1125 11:15:48.081932 4854 scope.go:117] "RemoveContainer" containerID="61e6b6ef535635f954ad448fb68ac962b6fa27ea046bd698d29faaf2bc887021" Nov 25 11:15:48 crc kubenswrapper[4854]: I1125 11:15:48.081991 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tkbkp" Nov 25 11:15:48 crc kubenswrapper[4854]: I1125 11:15:48.113305 4854 scope.go:117] "RemoveContainer" containerID="4adc926b0122d893b6274b0db668c85993a69fd95e46e9ea3cdf2840f8ed8bc4" Nov 25 11:15:48 crc kubenswrapper[4854]: I1125 11:15:48.137808 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tkbkp"] Nov 25 11:15:48 crc kubenswrapper[4854]: I1125 11:15:48.149974 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-tkbkp"] Nov 25 11:15:48 crc kubenswrapper[4854]: I1125 11:15:48.152026 4854 scope.go:117] "RemoveContainer" containerID="9a824dedb6545da144200fab04b7ef919e242f1a7dac847ff38d8b43161146e1" Nov 25 11:15:48 crc kubenswrapper[4854]: I1125 11:15:48.184593 4854 scope.go:117] "RemoveContainer" containerID="61e6b6ef535635f954ad448fb68ac962b6fa27ea046bd698d29faaf2bc887021" Nov 25 11:15:48 crc kubenswrapper[4854]: E1125 11:15:48.185326 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"61e6b6ef535635f954ad448fb68ac962b6fa27ea046bd698d29faaf2bc887021\": container with ID starting with 61e6b6ef535635f954ad448fb68ac962b6fa27ea046bd698d29faaf2bc887021 not found: ID does not exist" containerID="61e6b6ef535635f954ad448fb68ac962b6fa27ea046bd698d29faaf2bc887021" Nov 25 11:15:48 crc kubenswrapper[4854]: I1125 11:15:48.185374 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61e6b6ef535635f954ad448fb68ac962b6fa27ea046bd698d29faaf2bc887021"} err="failed to get container status \"61e6b6ef535635f954ad448fb68ac962b6fa27ea046bd698d29faaf2bc887021\": rpc error: code = NotFound desc = could not find container \"61e6b6ef535635f954ad448fb68ac962b6fa27ea046bd698d29faaf2bc887021\": container with ID starting with 61e6b6ef535635f954ad448fb68ac962b6fa27ea046bd698d29faaf2bc887021 not found: ID does not exist" Nov 25 11:15:48 crc kubenswrapper[4854]: I1125 11:15:48.185408 4854 scope.go:117] "RemoveContainer" containerID="4adc926b0122d893b6274b0db668c85993a69fd95e46e9ea3cdf2840f8ed8bc4" Nov 25 11:15:48 crc kubenswrapper[4854]: E1125 11:15:48.186233 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4adc926b0122d893b6274b0db668c85993a69fd95e46e9ea3cdf2840f8ed8bc4\": container with ID starting with 4adc926b0122d893b6274b0db668c85993a69fd95e46e9ea3cdf2840f8ed8bc4 not found: ID does not exist" containerID="4adc926b0122d893b6274b0db668c85993a69fd95e46e9ea3cdf2840f8ed8bc4" Nov 25 11:15:48 crc kubenswrapper[4854]: I1125 11:15:48.186269 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4adc926b0122d893b6274b0db668c85993a69fd95e46e9ea3cdf2840f8ed8bc4"} err="failed to get container status \"4adc926b0122d893b6274b0db668c85993a69fd95e46e9ea3cdf2840f8ed8bc4\": rpc error: code = NotFound desc = could not find container \"4adc926b0122d893b6274b0db668c85993a69fd95e46e9ea3cdf2840f8ed8bc4\": container with ID starting with 4adc926b0122d893b6274b0db668c85993a69fd95e46e9ea3cdf2840f8ed8bc4 not found: ID does not exist" Nov 25 11:15:48 crc kubenswrapper[4854]: I1125 11:15:48.186294 4854 scope.go:117] "RemoveContainer" containerID="9a824dedb6545da144200fab04b7ef919e242f1a7dac847ff38d8b43161146e1" Nov 25 11:15:48 crc kubenswrapper[4854]: E1125 11:15:48.186648 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9a824dedb6545da144200fab04b7ef919e242f1a7dac847ff38d8b43161146e1\": container with ID starting with 9a824dedb6545da144200fab04b7ef919e242f1a7dac847ff38d8b43161146e1 not found: ID does not exist" containerID="9a824dedb6545da144200fab04b7ef919e242f1a7dac847ff38d8b43161146e1" Nov 25 11:15:48 crc kubenswrapper[4854]: I1125 11:15:48.186693 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a824dedb6545da144200fab04b7ef919e242f1a7dac847ff38d8b43161146e1"} err="failed to get container status \"9a824dedb6545da144200fab04b7ef919e242f1a7dac847ff38d8b43161146e1\": rpc error: code = NotFound desc = could not find container \"9a824dedb6545da144200fab04b7ef919e242f1a7dac847ff38d8b43161146e1\": container with ID starting with 9a824dedb6545da144200fab04b7ef919e242f1a7dac847ff38d8b43161146e1 not found: ID does not exist" Nov 25 11:15:48 crc kubenswrapper[4854]: I1125 11:15:48.675995 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bbqnd"] Nov 25 11:15:49 crc kubenswrapper[4854]: I1125 11:15:49.035640 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dacf4cba-0662-4f35-a33b-6802b143d3b7" path="/var/lib/kubelet/pods/dacf4cba-0662-4f35-a33b-6802b143d3b7/volumes" Nov 25 11:15:49 crc kubenswrapper[4854]: I1125 11:15:49.095961 4854 generic.go:334] "Generic (PLEG): container finished" podID="528249c7-b26d-4ef1-9963-70e1dd171b3a" containerID="9ab86c3b583743a5b5128f979919cd3c1d6afd0bca65d4966b9953aceaff7a28" exitCode=0 Nov 25 11:15:49 crc kubenswrapper[4854]: I1125 11:15:49.096020 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bbqnd" event={"ID":"528249c7-b26d-4ef1-9963-70e1dd171b3a","Type":"ContainerDied","Data":"9ab86c3b583743a5b5128f979919cd3c1d6afd0bca65d4966b9953aceaff7a28"} Nov 25 11:15:49 crc kubenswrapper[4854]: I1125 11:15:49.096054 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bbqnd" event={"ID":"528249c7-b26d-4ef1-9963-70e1dd171b3a","Type":"ContainerStarted","Data":"98c4e50b3a1109ad67184d3be4e78ea0a17dff7947692b90ce6ae5eeaff0eed7"} Nov 25 11:15:51 crc kubenswrapper[4854]: I1125 11:15:51.119996 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bbqnd" event={"ID":"528249c7-b26d-4ef1-9963-70e1dd171b3a","Type":"ContainerStarted","Data":"69fa26231d4b979b50abeb9e6da9a46353e547983f71f4d34ff065abfc57ad31"} Nov 25 11:15:52 crc kubenswrapper[4854]: I1125 11:15:52.134870 4854 generic.go:334] "Generic (PLEG): container finished" podID="528249c7-b26d-4ef1-9963-70e1dd171b3a" containerID="69fa26231d4b979b50abeb9e6da9a46353e547983f71f4d34ff065abfc57ad31" exitCode=0 Nov 25 11:15:52 crc kubenswrapper[4854]: I1125 11:15:52.134924 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bbqnd" event={"ID":"528249c7-b26d-4ef1-9963-70e1dd171b3a","Type":"ContainerDied","Data":"69fa26231d4b979b50abeb9e6da9a46353e547983f71f4d34ff065abfc57ad31"} Nov 25 11:15:54 crc kubenswrapper[4854]: I1125 11:15:54.161647 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bbqnd" event={"ID":"528249c7-b26d-4ef1-9963-70e1dd171b3a","Type":"ContainerStarted","Data":"0b3db43c4163c8bb6665aedbac719f147fee3500672390ac9caba3c5aa1ba440"} Nov 25 11:15:54 crc kubenswrapper[4854]: I1125 11:15:54.194193 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-bbqnd" podStartSLOduration=3.64945424 podStartE2EDuration="7.194168951s" podCreationTimestamp="2025-11-25 11:15:47 +0000 UTC" firstStartedPulling="2025-11-25 11:15:49.099469855 +0000 UTC m=+5954.952463231" lastFinishedPulling="2025-11-25 11:15:52.644184566 +0000 UTC m=+5958.497177942" observedRunningTime="2025-11-25 11:15:54.190445698 +0000 UTC m=+5960.043439104" watchObservedRunningTime="2025-11-25 11:15:54.194168951 +0000 UTC m=+5960.047162327" Nov 25 11:15:57 crc kubenswrapper[4854]: I1125 11:15:57.013444 4854 scope.go:117] "RemoveContainer" containerID="f13c3fdaf7261b1a24dcc71b8b219a5a1847806fac4c3d9c9b94540b0bcd3272" Nov 25 11:15:57 crc kubenswrapper[4854]: E1125 11:15:57.014081 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:15:58 crc kubenswrapper[4854]: I1125 11:15:58.072299 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-bbqnd" Nov 25 11:15:58 crc kubenswrapper[4854]: I1125 11:15:58.072783 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-bbqnd" Nov 25 11:15:59 crc kubenswrapper[4854]: I1125 11:15:59.030460 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-g9vt4"] Nov 25 11:15:59 crc kubenswrapper[4854]: E1125 11:15:59.031067 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dacf4cba-0662-4f35-a33b-6802b143d3b7" containerName="extract-content" Nov 25 11:15:59 crc kubenswrapper[4854]: I1125 11:15:59.031081 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="dacf4cba-0662-4f35-a33b-6802b143d3b7" containerName="extract-content" Nov 25 11:15:59 crc kubenswrapper[4854]: E1125 11:15:59.031097 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dacf4cba-0662-4f35-a33b-6802b143d3b7" containerName="registry-server" Nov 25 11:15:59 crc kubenswrapper[4854]: I1125 11:15:59.031103 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="dacf4cba-0662-4f35-a33b-6802b143d3b7" containerName="registry-server" Nov 25 11:15:59 crc kubenswrapper[4854]: E1125 11:15:59.031115 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dacf4cba-0662-4f35-a33b-6802b143d3b7" containerName="extract-utilities" Nov 25 11:15:59 crc kubenswrapper[4854]: I1125 11:15:59.031121 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="dacf4cba-0662-4f35-a33b-6802b143d3b7" containerName="extract-utilities" Nov 25 11:15:59 crc kubenswrapper[4854]: I1125 11:15:59.031374 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="dacf4cba-0662-4f35-a33b-6802b143d3b7" containerName="registry-server" Nov 25 11:15:59 crc kubenswrapper[4854]: I1125 11:15:59.033154 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g9vt4" Nov 25 11:15:59 crc kubenswrapper[4854]: I1125 11:15:59.037282 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-g9vt4"] Nov 25 11:15:59 crc kubenswrapper[4854]: I1125 11:15:59.086785 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed196b98-663b-407a-a85a-6b845650f06c-catalog-content\") pod \"community-operators-g9vt4\" (UID: \"ed196b98-663b-407a-a85a-6b845650f06c\") " pod="openshift-marketplace/community-operators-g9vt4" Nov 25 11:15:59 crc kubenswrapper[4854]: I1125 11:15:59.086908 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fd8lk\" (UniqueName: \"kubernetes.io/projected/ed196b98-663b-407a-a85a-6b845650f06c-kube-api-access-fd8lk\") pod \"community-operators-g9vt4\" (UID: \"ed196b98-663b-407a-a85a-6b845650f06c\") " pod="openshift-marketplace/community-operators-g9vt4" Nov 25 11:15:59 crc kubenswrapper[4854]: I1125 11:15:59.086968 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed196b98-663b-407a-a85a-6b845650f06c-utilities\") pod \"community-operators-g9vt4\" (UID: \"ed196b98-663b-407a-a85a-6b845650f06c\") " pod="openshift-marketplace/community-operators-g9vt4" Nov 25 11:15:59 crc kubenswrapper[4854]: I1125 11:15:59.134447 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-bbqnd" podUID="528249c7-b26d-4ef1-9963-70e1dd171b3a" containerName="registry-server" probeResult="failure" output=< Nov 25 11:15:59 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:15:59 crc kubenswrapper[4854]: > Nov 25 11:15:59 crc kubenswrapper[4854]: I1125 11:15:59.189935 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed196b98-663b-407a-a85a-6b845650f06c-catalog-content\") pod \"community-operators-g9vt4\" (UID: \"ed196b98-663b-407a-a85a-6b845650f06c\") " pod="openshift-marketplace/community-operators-g9vt4" Nov 25 11:15:59 crc kubenswrapper[4854]: I1125 11:15:59.190096 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fd8lk\" (UniqueName: \"kubernetes.io/projected/ed196b98-663b-407a-a85a-6b845650f06c-kube-api-access-fd8lk\") pod \"community-operators-g9vt4\" (UID: \"ed196b98-663b-407a-a85a-6b845650f06c\") " pod="openshift-marketplace/community-operators-g9vt4" Nov 25 11:15:59 crc kubenswrapper[4854]: I1125 11:15:59.190138 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed196b98-663b-407a-a85a-6b845650f06c-utilities\") pod \"community-operators-g9vt4\" (UID: \"ed196b98-663b-407a-a85a-6b845650f06c\") " pod="openshift-marketplace/community-operators-g9vt4" Nov 25 11:15:59 crc kubenswrapper[4854]: I1125 11:15:59.190515 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed196b98-663b-407a-a85a-6b845650f06c-catalog-content\") pod \"community-operators-g9vt4\" (UID: \"ed196b98-663b-407a-a85a-6b845650f06c\") " pod="openshift-marketplace/community-operators-g9vt4" Nov 25 11:15:59 crc kubenswrapper[4854]: I1125 11:15:59.190770 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed196b98-663b-407a-a85a-6b845650f06c-utilities\") pod \"community-operators-g9vt4\" (UID: \"ed196b98-663b-407a-a85a-6b845650f06c\") " pod="openshift-marketplace/community-operators-g9vt4" Nov 25 11:15:59 crc kubenswrapper[4854]: I1125 11:15:59.229134 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fd8lk\" (UniqueName: \"kubernetes.io/projected/ed196b98-663b-407a-a85a-6b845650f06c-kube-api-access-fd8lk\") pod \"community-operators-g9vt4\" (UID: \"ed196b98-663b-407a-a85a-6b845650f06c\") " pod="openshift-marketplace/community-operators-g9vt4" Nov 25 11:15:59 crc kubenswrapper[4854]: I1125 11:15:59.361942 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g9vt4" Nov 25 11:16:00 crc kubenswrapper[4854]: I1125 11:16:00.026210 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-g9vt4"] Nov 25 11:16:00 crc kubenswrapper[4854]: I1125 11:16:00.252079 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g9vt4" event={"ID":"ed196b98-663b-407a-a85a-6b845650f06c","Type":"ContainerStarted","Data":"0f493319ad7da6f3372e50ae21b89714bd43b9c76a14ae80930c442ec7b9a1b8"} Nov 25 11:16:00 crc kubenswrapper[4854]: I1125 11:16:00.374399 4854 scope.go:117] "RemoveContainer" containerID="a38455385db4471897b8726eb92b373fd7a92d267ac57404943c1637475aa0d6" Nov 25 11:16:01 crc kubenswrapper[4854]: I1125 11:16:01.265894 4854 generic.go:334] "Generic (PLEG): container finished" podID="ed196b98-663b-407a-a85a-6b845650f06c" containerID="61f67cb8b646e7cd2970036bafea844e3fa60916c013b273a1e72fa065c40912" exitCode=0 Nov 25 11:16:01 crc kubenswrapper[4854]: I1125 11:16:01.265935 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g9vt4" event={"ID":"ed196b98-663b-407a-a85a-6b845650f06c","Type":"ContainerDied","Data":"61f67cb8b646e7cd2970036bafea844e3fa60916c013b273a1e72fa065c40912"} Nov 25 11:16:03 crc kubenswrapper[4854]: I1125 11:16:03.295004 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g9vt4" event={"ID":"ed196b98-663b-407a-a85a-6b845650f06c","Type":"ContainerStarted","Data":"f01799778c9a5b083d8e92402edd9150e9cbe590d1692d9c3b5e08b355d1f638"} Nov 25 11:16:05 crc kubenswrapper[4854]: I1125 11:16:05.333182 4854 generic.go:334] "Generic (PLEG): container finished" podID="ed196b98-663b-407a-a85a-6b845650f06c" containerID="f01799778c9a5b083d8e92402edd9150e9cbe590d1692d9c3b5e08b355d1f638" exitCode=0 Nov 25 11:16:05 crc kubenswrapper[4854]: I1125 11:16:05.333247 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g9vt4" event={"ID":"ed196b98-663b-407a-a85a-6b845650f06c","Type":"ContainerDied","Data":"f01799778c9a5b083d8e92402edd9150e9cbe590d1692d9c3b5e08b355d1f638"} Nov 25 11:16:07 crc kubenswrapper[4854]: I1125 11:16:07.362610 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g9vt4" event={"ID":"ed196b98-663b-407a-a85a-6b845650f06c","Type":"ContainerStarted","Data":"b005731bd5c51690457a0dd7947f5703d98f6f4d96a3b6a12708888d0e88d65d"} Nov 25 11:16:07 crc kubenswrapper[4854]: I1125 11:16:07.386403 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-g9vt4" podStartSLOduration=4.417113186 podStartE2EDuration="9.386385311s" podCreationTimestamp="2025-11-25 11:15:58 +0000 UTC" firstStartedPulling="2025-11-25 11:16:01.268083576 +0000 UTC m=+5967.121076952" lastFinishedPulling="2025-11-25 11:16:06.237355711 +0000 UTC m=+5972.090349077" observedRunningTime="2025-11-25 11:16:07.383649807 +0000 UTC m=+5973.236643203" watchObservedRunningTime="2025-11-25 11:16:07.386385311 +0000 UTC m=+5973.239378677" Nov 25 11:16:08 crc kubenswrapper[4854]: I1125 11:16:08.123453 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-bbqnd" Nov 25 11:16:08 crc kubenswrapper[4854]: I1125 11:16:08.198814 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-bbqnd" Nov 25 11:16:08 crc kubenswrapper[4854]: I1125 11:16:08.603931 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bbqnd"] Nov 25 11:16:09 crc kubenswrapper[4854]: I1125 11:16:09.013961 4854 scope.go:117] "RemoveContainer" containerID="f13c3fdaf7261b1a24dcc71b8b219a5a1847806fac4c3d9c9b94540b0bcd3272" Nov 25 11:16:09 crc kubenswrapper[4854]: E1125 11:16:09.014356 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:16:09 crc kubenswrapper[4854]: I1125 11:16:09.363003 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-g9vt4" Nov 25 11:16:09 crc kubenswrapper[4854]: I1125 11:16:09.363321 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-g9vt4" Nov 25 11:16:09 crc kubenswrapper[4854]: I1125 11:16:09.390209 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-bbqnd" podUID="528249c7-b26d-4ef1-9963-70e1dd171b3a" containerName="registry-server" containerID="cri-o://0b3db43c4163c8bb6665aedbac719f147fee3500672390ac9caba3c5aa1ba440" gracePeriod=2 Nov 25 11:16:09 crc kubenswrapper[4854]: I1125 11:16:09.422976 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-g9vt4" Nov 25 11:16:10 crc kubenswrapper[4854]: I1125 11:16:10.403953 4854 generic.go:334] "Generic (PLEG): container finished" podID="528249c7-b26d-4ef1-9963-70e1dd171b3a" containerID="0b3db43c4163c8bb6665aedbac719f147fee3500672390ac9caba3c5aa1ba440" exitCode=0 Nov 25 11:16:10 crc kubenswrapper[4854]: I1125 11:16:10.404342 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bbqnd" event={"ID":"528249c7-b26d-4ef1-9963-70e1dd171b3a","Type":"ContainerDied","Data":"0b3db43c4163c8bb6665aedbac719f147fee3500672390ac9caba3c5aa1ba440"} Nov 25 11:16:10 crc kubenswrapper[4854]: I1125 11:16:10.404399 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bbqnd" event={"ID":"528249c7-b26d-4ef1-9963-70e1dd171b3a","Type":"ContainerDied","Data":"98c4e50b3a1109ad67184d3be4e78ea0a17dff7947692b90ce6ae5eeaff0eed7"} Nov 25 11:16:10 crc kubenswrapper[4854]: I1125 11:16:10.404415 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="98c4e50b3a1109ad67184d3be4e78ea0a17dff7947692b90ce6ae5eeaff0eed7" Nov 25 11:16:10 crc kubenswrapper[4854]: I1125 11:16:10.490141 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bbqnd" Nov 25 11:16:10 crc kubenswrapper[4854]: I1125 11:16:10.575734 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rvpd2\" (UniqueName: \"kubernetes.io/projected/528249c7-b26d-4ef1-9963-70e1dd171b3a-kube-api-access-rvpd2\") pod \"528249c7-b26d-4ef1-9963-70e1dd171b3a\" (UID: \"528249c7-b26d-4ef1-9963-70e1dd171b3a\") " Nov 25 11:16:10 crc kubenswrapper[4854]: I1125 11:16:10.576162 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/528249c7-b26d-4ef1-9963-70e1dd171b3a-utilities\") pod \"528249c7-b26d-4ef1-9963-70e1dd171b3a\" (UID: \"528249c7-b26d-4ef1-9963-70e1dd171b3a\") " Nov 25 11:16:10 crc kubenswrapper[4854]: I1125 11:16:10.576293 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/528249c7-b26d-4ef1-9963-70e1dd171b3a-catalog-content\") pod \"528249c7-b26d-4ef1-9963-70e1dd171b3a\" (UID: \"528249c7-b26d-4ef1-9963-70e1dd171b3a\") " Nov 25 11:16:10 crc kubenswrapper[4854]: I1125 11:16:10.584327 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/528249c7-b26d-4ef1-9963-70e1dd171b3a-utilities" (OuterVolumeSpecName: "utilities") pod "528249c7-b26d-4ef1-9963-70e1dd171b3a" (UID: "528249c7-b26d-4ef1-9963-70e1dd171b3a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:16:10 crc kubenswrapper[4854]: I1125 11:16:10.602280 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/528249c7-b26d-4ef1-9963-70e1dd171b3a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "528249c7-b26d-4ef1-9963-70e1dd171b3a" (UID: "528249c7-b26d-4ef1-9963-70e1dd171b3a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:16:10 crc kubenswrapper[4854]: I1125 11:16:10.631575 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/528249c7-b26d-4ef1-9963-70e1dd171b3a-kube-api-access-rvpd2" (OuterVolumeSpecName: "kube-api-access-rvpd2") pod "528249c7-b26d-4ef1-9963-70e1dd171b3a" (UID: "528249c7-b26d-4ef1-9963-70e1dd171b3a"). InnerVolumeSpecName "kube-api-access-rvpd2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:16:10 crc kubenswrapper[4854]: I1125 11:16:10.678886 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/528249c7-b26d-4ef1-9963-70e1dd171b3a-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 11:16:10 crc kubenswrapper[4854]: I1125 11:16:10.678921 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/528249c7-b26d-4ef1-9963-70e1dd171b3a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 11:16:10 crc kubenswrapper[4854]: I1125 11:16:10.678934 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rvpd2\" (UniqueName: \"kubernetes.io/projected/528249c7-b26d-4ef1-9963-70e1dd171b3a-kube-api-access-rvpd2\") on node \"crc\" DevicePath \"\"" Nov 25 11:16:11 crc kubenswrapper[4854]: I1125 11:16:11.416448 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bbqnd" Nov 25 11:16:11 crc kubenswrapper[4854]: I1125 11:16:11.447538 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bbqnd"] Nov 25 11:16:11 crc kubenswrapper[4854]: I1125 11:16:11.465285 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-bbqnd"] Nov 25 11:16:13 crc kubenswrapper[4854]: I1125 11:16:13.029043 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="528249c7-b26d-4ef1-9963-70e1dd171b3a" path="/var/lib/kubelet/pods/528249c7-b26d-4ef1-9963-70e1dd171b3a/volumes" Nov 25 11:16:19 crc kubenswrapper[4854]: I1125 11:16:19.435124 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-g9vt4" Nov 25 11:16:19 crc kubenswrapper[4854]: I1125 11:16:19.495909 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-g9vt4"] Nov 25 11:16:19 crc kubenswrapper[4854]: I1125 11:16:19.509130 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-g9vt4" podUID="ed196b98-663b-407a-a85a-6b845650f06c" containerName="registry-server" containerID="cri-o://b005731bd5c51690457a0dd7947f5703d98f6f4d96a3b6a12708888d0e88d65d" gracePeriod=2 Nov 25 11:16:20 crc kubenswrapper[4854]: I1125 11:16:20.014121 4854 scope.go:117] "RemoveContainer" containerID="f13c3fdaf7261b1a24dcc71b8b219a5a1847806fac4c3d9c9b94540b0bcd3272" Nov 25 11:16:20 crc kubenswrapper[4854]: E1125 11:16:20.014429 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:16:20 crc kubenswrapper[4854]: I1125 11:16:20.528298 4854 generic.go:334] "Generic (PLEG): container finished" podID="ed196b98-663b-407a-a85a-6b845650f06c" containerID="b005731bd5c51690457a0dd7947f5703d98f6f4d96a3b6a12708888d0e88d65d" exitCode=0 Nov 25 11:16:20 crc kubenswrapper[4854]: I1125 11:16:20.528347 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g9vt4" event={"ID":"ed196b98-663b-407a-a85a-6b845650f06c","Type":"ContainerDied","Data":"b005731bd5c51690457a0dd7947f5703d98f6f4d96a3b6a12708888d0e88d65d"} Nov 25 11:16:21 crc kubenswrapper[4854]: I1125 11:16:21.170012 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g9vt4" Nov 25 11:16:21 crc kubenswrapper[4854]: I1125 11:16:21.352266 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fd8lk\" (UniqueName: \"kubernetes.io/projected/ed196b98-663b-407a-a85a-6b845650f06c-kube-api-access-fd8lk\") pod \"ed196b98-663b-407a-a85a-6b845650f06c\" (UID: \"ed196b98-663b-407a-a85a-6b845650f06c\") " Nov 25 11:16:21 crc kubenswrapper[4854]: I1125 11:16:21.352425 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed196b98-663b-407a-a85a-6b845650f06c-catalog-content\") pod \"ed196b98-663b-407a-a85a-6b845650f06c\" (UID: \"ed196b98-663b-407a-a85a-6b845650f06c\") " Nov 25 11:16:21 crc kubenswrapper[4854]: I1125 11:16:21.352800 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed196b98-663b-407a-a85a-6b845650f06c-utilities\") pod \"ed196b98-663b-407a-a85a-6b845650f06c\" (UID: \"ed196b98-663b-407a-a85a-6b845650f06c\") " Nov 25 11:16:21 crc kubenswrapper[4854]: I1125 11:16:21.353500 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ed196b98-663b-407a-a85a-6b845650f06c-utilities" (OuterVolumeSpecName: "utilities") pod "ed196b98-663b-407a-a85a-6b845650f06c" (UID: "ed196b98-663b-407a-a85a-6b845650f06c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:16:21 crc kubenswrapper[4854]: I1125 11:16:21.354271 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed196b98-663b-407a-a85a-6b845650f06c-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 11:16:21 crc kubenswrapper[4854]: I1125 11:16:21.359774 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed196b98-663b-407a-a85a-6b845650f06c-kube-api-access-fd8lk" (OuterVolumeSpecName: "kube-api-access-fd8lk") pod "ed196b98-663b-407a-a85a-6b845650f06c" (UID: "ed196b98-663b-407a-a85a-6b845650f06c"). InnerVolumeSpecName "kube-api-access-fd8lk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:16:21 crc kubenswrapper[4854]: I1125 11:16:21.430320 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ed196b98-663b-407a-a85a-6b845650f06c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ed196b98-663b-407a-a85a-6b845650f06c" (UID: "ed196b98-663b-407a-a85a-6b845650f06c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:16:21 crc kubenswrapper[4854]: I1125 11:16:21.457349 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fd8lk\" (UniqueName: \"kubernetes.io/projected/ed196b98-663b-407a-a85a-6b845650f06c-kube-api-access-fd8lk\") on node \"crc\" DevicePath \"\"" Nov 25 11:16:21 crc kubenswrapper[4854]: I1125 11:16:21.457398 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed196b98-663b-407a-a85a-6b845650f06c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 11:16:21 crc kubenswrapper[4854]: I1125 11:16:21.541474 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g9vt4" event={"ID":"ed196b98-663b-407a-a85a-6b845650f06c","Type":"ContainerDied","Data":"0f493319ad7da6f3372e50ae21b89714bd43b9c76a14ae80930c442ec7b9a1b8"} Nov 25 11:16:21 crc kubenswrapper[4854]: I1125 11:16:21.541535 4854 scope.go:117] "RemoveContainer" containerID="b005731bd5c51690457a0dd7947f5703d98f6f4d96a3b6a12708888d0e88d65d" Nov 25 11:16:21 crc kubenswrapper[4854]: I1125 11:16:21.541537 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g9vt4" Nov 25 11:16:21 crc kubenswrapper[4854]: I1125 11:16:21.565114 4854 scope.go:117] "RemoveContainer" containerID="f01799778c9a5b083d8e92402edd9150e9cbe590d1692d9c3b5e08b355d1f638" Nov 25 11:16:21 crc kubenswrapper[4854]: I1125 11:16:21.582991 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-g9vt4"] Nov 25 11:16:21 crc kubenswrapper[4854]: I1125 11:16:21.596080 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-g9vt4"] Nov 25 11:16:21 crc kubenswrapper[4854]: I1125 11:16:21.610408 4854 scope.go:117] "RemoveContainer" containerID="61f67cb8b646e7cd2970036bafea844e3fa60916c013b273a1e72fa065c40912" Nov 25 11:16:23 crc kubenswrapper[4854]: I1125 11:16:23.030849 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed196b98-663b-407a-a85a-6b845650f06c" path="/var/lib/kubelet/pods/ed196b98-663b-407a-a85a-6b845650f06c/volumes" Nov 25 11:16:35 crc kubenswrapper[4854]: I1125 11:16:35.021971 4854 scope.go:117] "RemoveContainer" containerID="f13c3fdaf7261b1a24dcc71b8b219a5a1847806fac4c3d9c9b94540b0bcd3272" Nov 25 11:16:35 crc kubenswrapper[4854]: E1125 11:16:35.023310 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:16:47 crc kubenswrapper[4854]: I1125 11:16:47.014266 4854 scope.go:117] "RemoveContainer" containerID="f13c3fdaf7261b1a24dcc71b8b219a5a1847806fac4c3d9c9b94540b0bcd3272" Nov 25 11:16:47 crc kubenswrapper[4854]: E1125 11:16:47.015060 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:17:01 crc kubenswrapper[4854]: I1125 11:17:01.013504 4854 scope.go:117] "RemoveContainer" containerID="f13c3fdaf7261b1a24dcc71b8b219a5a1847806fac4c3d9c9b94540b0bcd3272" Nov 25 11:17:01 crc kubenswrapper[4854]: E1125 11:17:01.014552 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:17:12 crc kubenswrapper[4854]: I1125 11:17:12.013786 4854 scope.go:117] "RemoveContainer" containerID="f13c3fdaf7261b1a24dcc71b8b219a5a1847806fac4c3d9c9b94540b0bcd3272" Nov 25 11:17:12 crc kubenswrapper[4854]: E1125 11:17:12.014571 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:17:24 crc kubenswrapper[4854]: I1125 11:17:24.022218 4854 scope.go:117] "RemoveContainer" containerID="f13c3fdaf7261b1a24dcc71b8b219a5a1847806fac4c3d9c9b94540b0bcd3272" Nov 25 11:17:24 crc kubenswrapper[4854]: E1125 11:17:24.023123 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:17:39 crc kubenswrapper[4854]: I1125 11:17:39.014989 4854 scope.go:117] "RemoveContainer" containerID="f13c3fdaf7261b1a24dcc71b8b219a5a1847806fac4c3d9c9b94540b0bcd3272" Nov 25 11:17:39 crc kubenswrapper[4854]: E1125 11:17:39.015761 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:17:50 crc kubenswrapper[4854]: I1125 11:17:50.014698 4854 scope.go:117] "RemoveContainer" containerID="f13c3fdaf7261b1a24dcc71b8b219a5a1847806fac4c3d9c9b94540b0bcd3272" Nov 25 11:17:50 crc kubenswrapper[4854]: E1125 11:17:50.015922 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:18:03 crc kubenswrapper[4854]: I1125 11:18:03.014006 4854 scope.go:117] "RemoveContainer" containerID="f13c3fdaf7261b1a24dcc71b8b219a5a1847806fac4c3d9c9b94540b0bcd3272" Nov 25 11:18:03 crc kubenswrapper[4854]: E1125 11:18:03.014997 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:18:18 crc kubenswrapper[4854]: I1125 11:18:18.014053 4854 scope.go:117] "RemoveContainer" containerID="f13c3fdaf7261b1a24dcc71b8b219a5a1847806fac4c3d9c9b94540b0bcd3272" Nov 25 11:18:18 crc kubenswrapper[4854]: E1125 11:18:18.014840 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:18:32 crc kubenswrapper[4854]: I1125 11:18:32.014067 4854 scope.go:117] "RemoveContainer" containerID="f13c3fdaf7261b1a24dcc71b8b219a5a1847806fac4c3d9c9b94540b0bcd3272" Nov 25 11:18:32 crc kubenswrapper[4854]: E1125 11:18:32.015205 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:18:44 crc kubenswrapper[4854]: I1125 11:18:44.013126 4854 scope.go:117] "RemoveContainer" containerID="f13c3fdaf7261b1a24dcc71b8b219a5a1847806fac4c3d9c9b94540b0bcd3272" Nov 25 11:18:44 crc kubenswrapper[4854]: E1125 11:18:44.013987 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:18:59 crc kubenswrapper[4854]: I1125 11:18:59.014154 4854 scope.go:117] "RemoveContainer" containerID="f13c3fdaf7261b1a24dcc71b8b219a5a1847806fac4c3d9c9b94540b0bcd3272" Nov 25 11:19:00 crc kubenswrapper[4854]: I1125 11:19:00.370880 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerStarted","Data":"2c8514cc6c40a71fc410000bbe6ceecaa6fa777b7603a916034922fed06f04fc"} Nov 25 11:19:30 crc kubenswrapper[4854]: I1125 11:19:30.736576 4854 generic.go:334] "Generic (PLEG): container finished" podID="2323d0fa-ad38-4041-b209-029ace425aa7" containerID="7c823a2c387f1ff53e26d01629f5717ad39f63d52b5c0f205b4448265cc87453" exitCode=0 Nov 25 11:19:30 crc kubenswrapper[4854]: I1125 11:19:30.736782 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"2323d0fa-ad38-4041-b209-029ace425aa7","Type":"ContainerDied","Data":"7c823a2c387f1ff53e26d01629f5717ad39f63d52b5c0f205b4448265cc87453"} Nov 25 11:19:32 crc kubenswrapper[4854]: I1125 11:19:32.205389 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 25 11:19:32 crc kubenswrapper[4854]: I1125 11:19:32.327797 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/2323d0fa-ad38-4041-b209-029ace425aa7-test-operator-ephemeral-workdir\") pod \"2323d0fa-ad38-4041-b209-029ace425aa7\" (UID: \"2323d0fa-ad38-4041-b209-029ace425aa7\") " Nov 25 11:19:32 crc kubenswrapper[4854]: I1125 11:19:32.327884 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/2323d0fa-ad38-4041-b209-029ace425aa7-ca-certs\") pod \"2323d0fa-ad38-4041-b209-029ace425aa7\" (UID: \"2323d0fa-ad38-4041-b209-029ace425aa7\") " Nov 25 11:19:32 crc kubenswrapper[4854]: I1125 11:19:32.327923 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"2323d0fa-ad38-4041-b209-029ace425aa7\" (UID: \"2323d0fa-ad38-4041-b209-029ace425aa7\") " Nov 25 11:19:32 crc kubenswrapper[4854]: I1125 11:19:32.327961 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2323d0fa-ad38-4041-b209-029ace425aa7-ssh-key\") pod \"2323d0fa-ad38-4041-b209-029ace425aa7\" (UID: \"2323d0fa-ad38-4041-b209-029ace425aa7\") " Nov 25 11:19:32 crc kubenswrapper[4854]: I1125 11:19:32.327982 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ct64n\" (UniqueName: \"kubernetes.io/projected/2323d0fa-ad38-4041-b209-029ace425aa7-kube-api-access-ct64n\") pod \"2323d0fa-ad38-4041-b209-029ace425aa7\" (UID: \"2323d0fa-ad38-4041-b209-029ace425aa7\") " Nov 25 11:19:32 crc kubenswrapper[4854]: I1125 11:19:32.328035 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/2323d0fa-ad38-4041-b209-029ace425aa7-test-operator-ephemeral-temporary\") pod \"2323d0fa-ad38-4041-b209-029ace425aa7\" (UID: \"2323d0fa-ad38-4041-b209-029ace425aa7\") " Nov 25 11:19:32 crc kubenswrapper[4854]: I1125 11:19:32.328076 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2323d0fa-ad38-4041-b209-029ace425aa7-config-data\") pod \"2323d0fa-ad38-4041-b209-029ace425aa7\" (UID: \"2323d0fa-ad38-4041-b209-029ace425aa7\") " Nov 25 11:19:32 crc kubenswrapper[4854]: I1125 11:19:32.328161 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/2323d0fa-ad38-4041-b209-029ace425aa7-openstack-config\") pod \"2323d0fa-ad38-4041-b209-029ace425aa7\" (UID: \"2323d0fa-ad38-4041-b209-029ace425aa7\") " Nov 25 11:19:32 crc kubenswrapper[4854]: I1125 11:19:32.328224 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/2323d0fa-ad38-4041-b209-029ace425aa7-openstack-config-secret\") pod \"2323d0fa-ad38-4041-b209-029ace425aa7\" (UID: \"2323d0fa-ad38-4041-b209-029ace425aa7\") " Nov 25 11:19:32 crc kubenswrapper[4854]: I1125 11:19:32.328840 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2323d0fa-ad38-4041-b209-029ace425aa7-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "2323d0fa-ad38-4041-b209-029ace425aa7" (UID: "2323d0fa-ad38-4041-b209-029ace425aa7"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:19:32 crc kubenswrapper[4854]: I1125 11:19:32.335181 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2323d0fa-ad38-4041-b209-029ace425aa7-kube-api-access-ct64n" (OuterVolumeSpecName: "kube-api-access-ct64n") pod "2323d0fa-ad38-4041-b209-029ace425aa7" (UID: "2323d0fa-ad38-4041-b209-029ace425aa7"). InnerVolumeSpecName "kube-api-access-ct64n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:19:32 crc kubenswrapper[4854]: I1125 11:19:32.337737 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2323d0fa-ad38-4041-b209-029ace425aa7-config-data" (OuterVolumeSpecName: "config-data") pod "2323d0fa-ad38-4041-b209-029ace425aa7" (UID: "2323d0fa-ad38-4041-b209-029ace425aa7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 11:19:32 crc kubenswrapper[4854]: I1125 11:19:32.338041 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2323d0fa-ad38-4041-b209-029ace425aa7-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "2323d0fa-ad38-4041-b209-029ace425aa7" (UID: "2323d0fa-ad38-4041-b209-029ace425aa7"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:19:32 crc kubenswrapper[4854]: I1125 11:19:32.338594 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "test-operator-logs") pod "2323d0fa-ad38-4041-b209-029ace425aa7" (UID: "2323d0fa-ad38-4041-b209-029ace425aa7"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 11:19:32 crc kubenswrapper[4854]: I1125 11:19:32.375730 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2323d0fa-ad38-4041-b209-029ace425aa7-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "2323d0fa-ad38-4041-b209-029ace425aa7" (UID: "2323d0fa-ad38-4041-b209-029ace425aa7"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:19:32 crc kubenswrapper[4854]: I1125 11:19:32.380355 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2323d0fa-ad38-4041-b209-029ace425aa7-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "2323d0fa-ad38-4041-b209-029ace425aa7" (UID: "2323d0fa-ad38-4041-b209-029ace425aa7"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:19:32 crc kubenswrapper[4854]: I1125 11:19:32.389808 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2323d0fa-ad38-4041-b209-029ace425aa7-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "2323d0fa-ad38-4041-b209-029ace425aa7" (UID: "2323d0fa-ad38-4041-b209-029ace425aa7"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:19:32 crc kubenswrapper[4854]: I1125 11:19:32.407257 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2323d0fa-ad38-4041-b209-029ace425aa7-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "2323d0fa-ad38-4041-b209-029ace425aa7" (UID: "2323d0fa-ad38-4041-b209-029ace425aa7"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 11:19:32 crc kubenswrapper[4854]: I1125 11:19:32.441863 4854 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Nov 25 11:19:32 crc kubenswrapper[4854]: I1125 11:19:32.441904 4854 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2323d0fa-ad38-4041-b209-029ace425aa7-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 11:19:32 crc kubenswrapper[4854]: I1125 11:19:32.441917 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ct64n\" (UniqueName: \"kubernetes.io/projected/2323d0fa-ad38-4041-b209-029ace425aa7-kube-api-access-ct64n\") on node \"crc\" DevicePath \"\"" Nov 25 11:19:32 crc kubenswrapper[4854]: I1125 11:19:32.441929 4854 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/2323d0fa-ad38-4041-b209-029ace425aa7-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Nov 25 11:19:32 crc kubenswrapper[4854]: I1125 11:19:32.441940 4854 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2323d0fa-ad38-4041-b209-029ace425aa7-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 11:19:32 crc kubenswrapper[4854]: I1125 11:19:32.441957 4854 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/2323d0fa-ad38-4041-b209-029ace425aa7-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 25 11:19:32 crc kubenswrapper[4854]: I1125 11:19:32.441970 4854 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/2323d0fa-ad38-4041-b209-029ace425aa7-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 25 11:19:32 crc kubenswrapper[4854]: I1125 11:19:32.441982 4854 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/2323d0fa-ad38-4041-b209-029ace425aa7-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Nov 25 11:19:32 crc kubenswrapper[4854]: I1125 11:19:32.441993 4854 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/2323d0fa-ad38-4041-b209-029ace425aa7-ca-certs\") on node \"crc\" DevicePath \"\"" Nov 25 11:19:32 crc kubenswrapper[4854]: I1125 11:19:32.476977 4854 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Nov 25 11:19:32 crc kubenswrapper[4854]: I1125 11:19:32.544291 4854 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Nov 25 11:19:32 crc kubenswrapper[4854]: I1125 11:19:32.760742 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"2323d0fa-ad38-4041-b209-029ace425aa7","Type":"ContainerDied","Data":"0fdf31fcd304d4093a74173d7a51cc83c462e369ed469efe5e661a5830eae504"} Nov 25 11:19:32 crc kubenswrapper[4854]: I1125 11:19:32.760783 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0fdf31fcd304d4093a74173d7a51cc83c462e369ed469efe5e661a5830eae504" Nov 25 11:19:32 crc kubenswrapper[4854]: I1125 11:19:32.761305 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 25 11:19:34 crc kubenswrapper[4854]: I1125 11:19:34.530163 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 25 11:19:34 crc kubenswrapper[4854]: E1125 11:19:34.531500 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="528249c7-b26d-4ef1-9963-70e1dd171b3a" containerName="extract-utilities" Nov 25 11:19:34 crc kubenswrapper[4854]: I1125 11:19:34.531523 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="528249c7-b26d-4ef1-9963-70e1dd171b3a" containerName="extract-utilities" Nov 25 11:19:34 crc kubenswrapper[4854]: E1125 11:19:34.531536 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="528249c7-b26d-4ef1-9963-70e1dd171b3a" containerName="extract-content" Nov 25 11:19:34 crc kubenswrapper[4854]: I1125 11:19:34.531542 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="528249c7-b26d-4ef1-9963-70e1dd171b3a" containerName="extract-content" Nov 25 11:19:34 crc kubenswrapper[4854]: E1125 11:19:34.531582 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2323d0fa-ad38-4041-b209-029ace425aa7" containerName="tempest-tests-tempest-tests-runner" Nov 25 11:19:34 crc kubenswrapper[4854]: I1125 11:19:34.531589 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="2323d0fa-ad38-4041-b209-029ace425aa7" containerName="tempest-tests-tempest-tests-runner" Nov 25 11:19:34 crc kubenswrapper[4854]: E1125 11:19:34.531603 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed196b98-663b-407a-a85a-6b845650f06c" containerName="extract-utilities" Nov 25 11:19:34 crc kubenswrapper[4854]: I1125 11:19:34.531609 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed196b98-663b-407a-a85a-6b845650f06c" containerName="extract-utilities" Nov 25 11:19:34 crc kubenswrapper[4854]: E1125 11:19:34.531626 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed196b98-663b-407a-a85a-6b845650f06c" containerName="registry-server" Nov 25 11:19:34 crc kubenswrapper[4854]: I1125 11:19:34.531632 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed196b98-663b-407a-a85a-6b845650f06c" containerName="registry-server" Nov 25 11:19:34 crc kubenswrapper[4854]: E1125 11:19:34.531653 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="528249c7-b26d-4ef1-9963-70e1dd171b3a" containerName="registry-server" Nov 25 11:19:34 crc kubenswrapper[4854]: I1125 11:19:34.531658 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="528249c7-b26d-4ef1-9963-70e1dd171b3a" containerName="registry-server" Nov 25 11:19:34 crc kubenswrapper[4854]: E1125 11:19:34.531709 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed196b98-663b-407a-a85a-6b845650f06c" containerName="extract-content" Nov 25 11:19:34 crc kubenswrapper[4854]: I1125 11:19:34.531717 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed196b98-663b-407a-a85a-6b845650f06c" containerName="extract-content" Nov 25 11:19:34 crc kubenswrapper[4854]: I1125 11:19:34.532006 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="2323d0fa-ad38-4041-b209-029ace425aa7" containerName="tempest-tests-tempest-tests-runner" Nov 25 11:19:34 crc kubenswrapper[4854]: I1125 11:19:34.532022 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed196b98-663b-407a-a85a-6b845650f06c" containerName="registry-server" Nov 25 11:19:34 crc kubenswrapper[4854]: I1125 11:19:34.532049 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="528249c7-b26d-4ef1-9963-70e1dd171b3a" containerName="registry-server" Nov 25 11:19:34 crc kubenswrapper[4854]: I1125 11:19:34.533432 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 11:19:34 crc kubenswrapper[4854]: I1125 11:19:34.537813 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-s5gn4" Nov 25 11:19:34 crc kubenswrapper[4854]: I1125 11:19:34.543639 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 25 11:19:34 crc kubenswrapper[4854]: I1125 11:19:34.600558 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"2610fa25-8ff3-403a-aea8-408f28a4fb9a\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 11:19:34 crc kubenswrapper[4854]: I1125 11:19:34.600654 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k7zr4\" (UniqueName: \"kubernetes.io/projected/2610fa25-8ff3-403a-aea8-408f28a4fb9a-kube-api-access-k7zr4\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"2610fa25-8ff3-403a-aea8-408f28a4fb9a\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 11:19:34 crc kubenswrapper[4854]: I1125 11:19:34.704465 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"2610fa25-8ff3-403a-aea8-408f28a4fb9a\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 11:19:34 crc kubenswrapper[4854]: I1125 11:19:34.704578 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k7zr4\" (UniqueName: \"kubernetes.io/projected/2610fa25-8ff3-403a-aea8-408f28a4fb9a-kube-api-access-k7zr4\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"2610fa25-8ff3-403a-aea8-408f28a4fb9a\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 11:19:34 crc kubenswrapper[4854]: I1125 11:19:34.705479 4854 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"2610fa25-8ff3-403a-aea8-408f28a4fb9a\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 11:19:34 crc kubenswrapper[4854]: I1125 11:19:34.732892 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k7zr4\" (UniqueName: \"kubernetes.io/projected/2610fa25-8ff3-403a-aea8-408f28a4fb9a-kube-api-access-k7zr4\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"2610fa25-8ff3-403a-aea8-408f28a4fb9a\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 11:19:34 crc kubenswrapper[4854]: I1125 11:19:34.744185 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"2610fa25-8ff3-403a-aea8-408f28a4fb9a\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 11:19:34 crc kubenswrapper[4854]: I1125 11:19:34.870989 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 11:19:35 crc kubenswrapper[4854]: I1125 11:19:35.385706 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 25 11:19:35 crc kubenswrapper[4854]: I1125 11:19:35.802937 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"2610fa25-8ff3-403a-aea8-408f28a4fb9a","Type":"ContainerStarted","Data":"69fee0b4ba2befea31b5004fc8aad1f492b376dc9b167d1ef861aaf6cd8f5f4e"} Nov 25 11:19:40 crc kubenswrapper[4854]: I1125 11:19:40.858504 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"2610fa25-8ff3-403a-aea8-408f28a4fb9a","Type":"ContainerStarted","Data":"e8825ecfb93fe451bc021c46609b9523c59cbeccf4468a679fb05169b9412d66"} Nov 25 11:20:40 crc kubenswrapper[4854]: I1125 11:20:40.691049 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=62.383909773 podStartE2EDuration="1m6.691023993s" podCreationTimestamp="2025-11-25 11:19:34 +0000 UTC" firstStartedPulling="2025-11-25 11:19:35.421079723 +0000 UTC m=+6181.274073119" lastFinishedPulling="2025-11-25 11:19:39.728193963 +0000 UTC m=+6185.581187339" observedRunningTime="2025-11-25 11:19:40.876107192 +0000 UTC m=+6186.729100568" watchObservedRunningTime="2025-11-25 11:20:40.691023993 +0000 UTC m=+6246.544017369" Nov 25 11:20:40 crc kubenswrapper[4854]: I1125 11:20:40.700540 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-sm8s5/must-gather-5468f"] Nov 25 11:20:40 crc kubenswrapper[4854]: I1125 11:20:40.706802 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sm8s5/must-gather-5468f" Nov 25 11:20:40 crc kubenswrapper[4854]: I1125 11:20:40.717282 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-sm8s5/must-gather-5468f"] Nov 25 11:20:40 crc kubenswrapper[4854]: I1125 11:20:40.718345 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-sm8s5"/"openshift-service-ca.crt" Nov 25 11:20:40 crc kubenswrapper[4854]: I1125 11:20:40.718857 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-sm8s5"/"default-dockercfg-jlgd2" Nov 25 11:20:40 crc kubenswrapper[4854]: I1125 11:20:40.729442 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-sm8s5"/"kube-root-ca.crt" Nov 25 11:20:40 crc kubenswrapper[4854]: I1125 11:20:40.873667 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/afbe3ed1-5005-490f-a465-9a711e8fa24e-must-gather-output\") pod \"must-gather-5468f\" (UID: \"afbe3ed1-5005-490f-a465-9a711e8fa24e\") " pod="openshift-must-gather-sm8s5/must-gather-5468f" Nov 25 11:20:40 crc kubenswrapper[4854]: I1125 11:20:40.873790 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-clf8j\" (UniqueName: \"kubernetes.io/projected/afbe3ed1-5005-490f-a465-9a711e8fa24e-kube-api-access-clf8j\") pod \"must-gather-5468f\" (UID: \"afbe3ed1-5005-490f-a465-9a711e8fa24e\") " pod="openshift-must-gather-sm8s5/must-gather-5468f" Nov 25 11:20:40 crc kubenswrapper[4854]: I1125 11:20:40.976688 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/afbe3ed1-5005-490f-a465-9a711e8fa24e-must-gather-output\") pod \"must-gather-5468f\" (UID: \"afbe3ed1-5005-490f-a465-9a711e8fa24e\") " pod="openshift-must-gather-sm8s5/must-gather-5468f" Nov 25 11:20:40 crc kubenswrapper[4854]: I1125 11:20:40.976829 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-clf8j\" (UniqueName: \"kubernetes.io/projected/afbe3ed1-5005-490f-a465-9a711e8fa24e-kube-api-access-clf8j\") pod \"must-gather-5468f\" (UID: \"afbe3ed1-5005-490f-a465-9a711e8fa24e\") " pod="openshift-must-gather-sm8s5/must-gather-5468f" Nov 25 11:20:40 crc kubenswrapper[4854]: I1125 11:20:40.977772 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/afbe3ed1-5005-490f-a465-9a711e8fa24e-must-gather-output\") pod \"must-gather-5468f\" (UID: \"afbe3ed1-5005-490f-a465-9a711e8fa24e\") " pod="openshift-must-gather-sm8s5/must-gather-5468f" Nov 25 11:20:41 crc kubenswrapper[4854]: I1125 11:20:41.001109 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-clf8j\" (UniqueName: \"kubernetes.io/projected/afbe3ed1-5005-490f-a465-9a711e8fa24e-kube-api-access-clf8j\") pod \"must-gather-5468f\" (UID: \"afbe3ed1-5005-490f-a465-9a711e8fa24e\") " pod="openshift-must-gather-sm8s5/must-gather-5468f" Nov 25 11:20:41 crc kubenswrapper[4854]: I1125 11:20:41.032499 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sm8s5/must-gather-5468f" Nov 25 11:20:41 crc kubenswrapper[4854]: I1125 11:20:41.621547 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-sm8s5/must-gather-5468f"] Nov 25 11:20:41 crc kubenswrapper[4854]: I1125 11:20:41.642112 4854 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 11:20:41 crc kubenswrapper[4854]: I1125 11:20:41.667903 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sm8s5/must-gather-5468f" event={"ID":"afbe3ed1-5005-490f-a465-9a711e8fa24e","Type":"ContainerStarted","Data":"f0a2071e8610c4284351c8d6d96ed27a7ed5ff2f9021fd591909d3c2843122e3"} Nov 25 11:20:52 crc kubenswrapper[4854]: I1125 11:20:52.819929 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sm8s5/must-gather-5468f" event={"ID":"afbe3ed1-5005-490f-a465-9a711e8fa24e","Type":"ContainerStarted","Data":"ec9f8de1c095957e6fa5e8b51b16c8c87790218f976c18170ed8ec55a7119ffa"} Nov 25 11:20:52 crc kubenswrapper[4854]: I1125 11:20:52.820461 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sm8s5/must-gather-5468f" event={"ID":"afbe3ed1-5005-490f-a465-9a711e8fa24e","Type":"ContainerStarted","Data":"a013b66ee810706a53bd3909c1fcde5809bf827d72b073743e6d84143ec09036"} Nov 25 11:20:52 crc kubenswrapper[4854]: I1125 11:20:52.835093 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-sm8s5/must-gather-5468f" podStartSLOduration=2.985841344 podStartE2EDuration="12.835073814s" podCreationTimestamp="2025-11-25 11:20:40 +0000 UTC" firstStartedPulling="2025-11-25 11:20:41.641902635 +0000 UTC m=+6247.494896011" lastFinishedPulling="2025-11-25 11:20:51.491135105 +0000 UTC m=+6257.344128481" observedRunningTime="2025-11-25 11:20:52.834088078 +0000 UTC m=+6258.687081454" watchObservedRunningTime="2025-11-25 11:20:52.835073814 +0000 UTC m=+6258.688067190" Nov 25 11:21:01 crc kubenswrapper[4854]: I1125 11:21:01.339725 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-sm8s5/crc-debug-chbzn"] Nov 25 11:21:01 crc kubenswrapper[4854]: I1125 11:21:01.342316 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sm8s5/crc-debug-chbzn" Nov 25 11:21:01 crc kubenswrapper[4854]: I1125 11:21:01.489419 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/fd717731-6493-4479-a9bf-e0c93d9f9032-host\") pod \"crc-debug-chbzn\" (UID: \"fd717731-6493-4479-a9bf-e0c93d9f9032\") " pod="openshift-must-gather-sm8s5/crc-debug-chbzn" Nov 25 11:21:01 crc kubenswrapper[4854]: I1125 11:21:01.490229 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n2rz4\" (UniqueName: \"kubernetes.io/projected/fd717731-6493-4479-a9bf-e0c93d9f9032-kube-api-access-n2rz4\") pod \"crc-debug-chbzn\" (UID: \"fd717731-6493-4479-a9bf-e0c93d9f9032\") " pod="openshift-must-gather-sm8s5/crc-debug-chbzn" Nov 25 11:21:01 crc kubenswrapper[4854]: I1125 11:21:01.592121 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n2rz4\" (UniqueName: \"kubernetes.io/projected/fd717731-6493-4479-a9bf-e0c93d9f9032-kube-api-access-n2rz4\") pod \"crc-debug-chbzn\" (UID: \"fd717731-6493-4479-a9bf-e0c93d9f9032\") " pod="openshift-must-gather-sm8s5/crc-debug-chbzn" Nov 25 11:21:01 crc kubenswrapper[4854]: I1125 11:21:01.592242 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/fd717731-6493-4479-a9bf-e0c93d9f9032-host\") pod \"crc-debug-chbzn\" (UID: \"fd717731-6493-4479-a9bf-e0c93d9f9032\") " pod="openshift-must-gather-sm8s5/crc-debug-chbzn" Nov 25 11:21:01 crc kubenswrapper[4854]: I1125 11:21:01.593261 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/fd717731-6493-4479-a9bf-e0c93d9f9032-host\") pod \"crc-debug-chbzn\" (UID: \"fd717731-6493-4479-a9bf-e0c93d9f9032\") " pod="openshift-must-gather-sm8s5/crc-debug-chbzn" Nov 25 11:21:01 crc kubenswrapper[4854]: I1125 11:21:01.616777 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n2rz4\" (UniqueName: \"kubernetes.io/projected/fd717731-6493-4479-a9bf-e0c93d9f9032-kube-api-access-n2rz4\") pod \"crc-debug-chbzn\" (UID: \"fd717731-6493-4479-a9bf-e0c93d9f9032\") " pod="openshift-must-gather-sm8s5/crc-debug-chbzn" Nov 25 11:21:01 crc kubenswrapper[4854]: I1125 11:21:01.664153 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sm8s5/crc-debug-chbzn" Nov 25 11:21:01 crc kubenswrapper[4854]: I1125 11:21:01.964492 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sm8s5/crc-debug-chbzn" event={"ID":"fd717731-6493-4479-a9bf-e0c93d9f9032","Type":"ContainerStarted","Data":"0d46adf571fefd6083ccc53617e3388bd4f76fbffd2714f58c307ce04527a4d2"} Nov 25 11:21:03 crc kubenswrapper[4854]: E1125 11:21:03.927153 4854 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.184:36286->38.102.83.184:43333: write tcp 38.102.83.184:36286->38.102.83.184:43333: write: broken pipe Nov 25 11:21:18 crc kubenswrapper[4854]: E1125 11:21:18.499422 4854 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6ab858aed98e4fe57e6b144da8e90ad5d6698bb4cc5521206f5c05809f0f9296" Nov 25 11:21:18 crc kubenswrapper[4854]: E1125 11:21:18.506781 4854 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:container-00,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6ab858aed98e4fe57e6b144da8e90ad5d6698bb4cc5521206f5c05809f0f9296,Command:[chroot /host bash -c echo 'TOOLBOX_NAME=toolbox-osp' > /root/.toolboxrc ; rm -rf \"/var/tmp/sos-osp\" && mkdir -p \"/var/tmp/sos-osp\" && sudo podman rm --force toolbox-osp; sudo --preserve-env podman pull --authfile /var/lib/kubelet/config.json registry.redhat.io/rhel9/support-tools && toolbox sos report --batch --all-logs --only-plugins block,cifs,crio,devicemapper,devices,firewall_tables,firewalld,iscsi,lvm2,memory,multipath,nfs,nis,nvme,podman,process,processor,selinux,scsi,udev,logs,crypto --tmp-dir=\"/var/tmp/sos-osp\" && if [[ \"$(ls /var/log/pods/*/{*.log.*,*/*.log.*} 2>/dev/null)\" != '' ]]; then tar --ignore-failed-read --warning=no-file-changed -cJf \"/var/tmp/sos-osp/podlogs.tar.xz\" --transform 's,^,podlogs/,' /var/log/pods/*/{*.log.*,*/*.log.*} || true; fi],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:TMOUT,Value:900,ValueFrom:nil,},EnvVar{Name:HOST,Value:/host,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:host,ReadOnly:false,MountPath:/host,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-n2rz4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod crc-debug-chbzn_openshift-must-gather-sm8s5(fd717731-6493-4479-a9bf-e0c93d9f9032): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 11:21:18 crc kubenswrapper[4854]: E1125 11:21:18.508025 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"container-00\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openshift-must-gather-sm8s5/crc-debug-chbzn" podUID="fd717731-6493-4479-a9bf-e0c93d9f9032" Nov 25 11:21:19 crc kubenswrapper[4854]: E1125 11:21:19.239120 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"container-00\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6ab858aed98e4fe57e6b144da8e90ad5d6698bb4cc5521206f5c05809f0f9296\\\"\"" pod="openshift-must-gather-sm8s5/crc-debug-chbzn" podUID="fd717731-6493-4479-a9bf-e0c93d9f9032" Nov 25 11:21:25 crc kubenswrapper[4854]: I1125 11:21:25.028998 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 11:21:25 crc kubenswrapper[4854]: I1125 11:21:25.029563 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 11:21:32 crc kubenswrapper[4854]: I1125 11:21:32.381349 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sm8s5/crc-debug-chbzn" event={"ID":"fd717731-6493-4479-a9bf-e0c93d9f9032","Type":"ContainerStarted","Data":"1278e0a139c7aebd5a0bdd140bfb2204e1ba72c288c321435ae47396d8618a1e"} Nov 25 11:21:32 crc kubenswrapper[4854]: I1125 11:21:32.403855 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-sm8s5/crc-debug-chbzn" podStartSLOduration=1.613269909 podStartE2EDuration="31.403830781s" podCreationTimestamp="2025-11-25 11:21:01 +0000 UTC" firstStartedPulling="2025-11-25 11:21:01.733525423 +0000 UTC m=+6267.586518799" lastFinishedPulling="2025-11-25 11:21:31.524086295 +0000 UTC m=+6297.377079671" observedRunningTime="2025-11-25 11:21:32.394276741 +0000 UTC m=+6298.247270117" watchObservedRunningTime="2025-11-25 11:21:32.403830781 +0000 UTC m=+6298.256824157" Nov 25 11:21:55 crc kubenswrapper[4854]: I1125 11:21:55.029503 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 11:21:55 crc kubenswrapper[4854]: I1125 11:21:55.030118 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 11:22:00 crc kubenswrapper[4854]: I1125 11:22:00.683843 4854 scope.go:117] "RemoveContainer" containerID="0b3db43c4163c8bb6665aedbac719f147fee3500672390ac9caba3c5aa1ba440" Nov 25 11:22:00 crc kubenswrapper[4854]: I1125 11:22:00.715473 4854 scope.go:117] "RemoveContainer" containerID="9ab86c3b583743a5b5128f979919cd3c1d6afd0bca65d4966b9953aceaff7a28" Nov 25 11:22:00 crc kubenswrapper[4854]: I1125 11:22:00.767691 4854 scope.go:117] "RemoveContainer" containerID="69fa26231d4b979b50abeb9e6da9a46353e547983f71f4d34ff065abfc57ad31" Nov 25 11:22:25 crc kubenswrapper[4854]: I1125 11:22:25.029274 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 11:22:25 crc kubenswrapper[4854]: I1125 11:22:25.031525 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 11:22:25 crc kubenswrapper[4854]: I1125 11:22:25.031809 4854 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" Nov 25 11:22:25 crc kubenswrapper[4854]: I1125 11:22:25.033660 4854 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2c8514cc6c40a71fc410000bbe6ceecaa6fa777b7603a916034922fed06f04fc"} pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 11:22:25 crc kubenswrapper[4854]: I1125 11:22:25.034109 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" containerID="cri-o://2c8514cc6c40a71fc410000bbe6ceecaa6fa777b7603a916034922fed06f04fc" gracePeriod=600 Nov 25 11:22:26 crc kubenswrapper[4854]: I1125 11:22:26.094201 4854 generic.go:334] "Generic (PLEG): container finished" podID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerID="2c8514cc6c40a71fc410000bbe6ceecaa6fa777b7603a916034922fed06f04fc" exitCode=0 Nov 25 11:22:26 crc kubenswrapper[4854]: I1125 11:22:26.094269 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerDied","Data":"2c8514cc6c40a71fc410000bbe6ceecaa6fa777b7603a916034922fed06f04fc"} Nov 25 11:22:26 crc kubenswrapper[4854]: I1125 11:22:26.094556 4854 scope.go:117] "RemoveContainer" containerID="f13c3fdaf7261b1a24dcc71b8b219a5a1847806fac4c3d9c9b94540b0bcd3272" Nov 25 11:22:27 crc kubenswrapper[4854]: I1125 11:22:27.112112 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerStarted","Data":"7f7b0bfefe09066bd006d68161d8598a1327dba11d008e89a0ee63321e865909"} Nov 25 11:22:47 crc kubenswrapper[4854]: I1125 11:22:47.345990 4854 generic.go:334] "Generic (PLEG): container finished" podID="fd717731-6493-4479-a9bf-e0c93d9f9032" containerID="1278e0a139c7aebd5a0bdd140bfb2204e1ba72c288c321435ae47396d8618a1e" exitCode=0 Nov 25 11:22:47 crc kubenswrapper[4854]: I1125 11:22:47.346043 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sm8s5/crc-debug-chbzn" event={"ID":"fd717731-6493-4479-a9bf-e0c93d9f9032","Type":"ContainerDied","Data":"1278e0a139c7aebd5a0bdd140bfb2204e1ba72c288c321435ae47396d8618a1e"} Nov 25 11:22:48 crc kubenswrapper[4854]: I1125 11:22:48.482651 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sm8s5/crc-debug-chbzn" Nov 25 11:22:48 crc kubenswrapper[4854]: I1125 11:22:48.531230 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-sm8s5/crc-debug-chbzn"] Nov 25 11:22:48 crc kubenswrapper[4854]: I1125 11:22:48.543036 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-sm8s5/crc-debug-chbzn"] Nov 25 11:22:48 crc kubenswrapper[4854]: I1125 11:22:48.597519 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/fd717731-6493-4479-a9bf-e0c93d9f9032-host\") pod \"fd717731-6493-4479-a9bf-e0c93d9f9032\" (UID: \"fd717731-6493-4479-a9bf-e0c93d9f9032\") " Nov 25 11:22:48 crc kubenswrapper[4854]: I1125 11:22:48.597663 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/fd717731-6493-4479-a9bf-e0c93d9f9032-host" (OuterVolumeSpecName: "host") pod "fd717731-6493-4479-a9bf-e0c93d9f9032" (UID: "fd717731-6493-4479-a9bf-e0c93d9f9032"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 11:22:48 crc kubenswrapper[4854]: I1125 11:22:48.597788 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n2rz4\" (UniqueName: \"kubernetes.io/projected/fd717731-6493-4479-a9bf-e0c93d9f9032-kube-api-access-n2rz4\") pod \"fd717731-6493-4479-a9bf-e0c93d9f9032\" (UID: \"fd717731-6493-4479-a9bf-e0c93d9f9032\") " Nov 25 11:22:48 crc kubenswrapper[4854]: I1125 11:22:48.598568 4854 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/fd717731-6493-4479-a9bf-e0c93d9f9032-host\") on node \"crc\" DevicePath \"\"" Nov 25 11:22:48 crc kubenswrapper[4854]: I1125 11:22:48.605423 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd717731-6493-4479-a9bf-e0c93d9f9032-kube-api-access-n2rz4" (OuterVolumeSpecName: "kube-api-access-n2rz4") pod "fd717731-6493-4479-a9bf-e0c93d9f9032" (UID: "fd717731-6493-4479-a9bf-e0c93d9f9032"). InnerVolumeSpecName "kube-api-access-n2rz4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:22:48 crc kubenswrapper[4854]: I1125 11:22:48.701801 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n2rz4\" (UniqueName: \"kubernetes.io/projected/fd717731-6493-4479-a9bf-e0c93d9f9032-kube-api-access-n2rz4\") on node \"crc\" DevicePath \"\"" Nov 25 11:22:49 crc kubenswrapper[4854]: I1125 11:22:49.029287 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd717731-6493-4479-a9bf-e0c93d9f9032" path="/var/lib/kubelet/pods/fd717731-6493-4479-a9bf-e0c93d9f9032/volumes" Nov 25 11:22:49 crc kubenswrapper[4854]: I1125 11:22:49.371575 4854 scope.go:117] "RemoveContainer" containerID="1278e0a139c7aebd5a0bdd140bfb2204e1ba72c288c321435ae47396d8618a1e" Nov 25 11:22:49 crc kubenswrapper[4854]: I1125 11:22:49.371604 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sm8s5/crc-debug-chbzn" Nov 25 11:22:49 crc kubenswrapper[4854]: I1125 11:22:49.728123 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-sm8s5/crc-debug-8stxm"] Nov 25 11:22:49 crc kubenswrapper[4854]: E1125 11:22:49.728997 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd717731-6493-4479-a9bf-e0c93d9f9032" containerName="container-00" Nov 25 11:22:49 crc kubenswrapper[4854]: I1125 11:22:49.729012 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd717731-6493-4479-a9bf-e0c93d9f9032" containerName="container-00" Nov 25 11:22:49 crc kubenswrapper[4854]: I1125 11:22:49.729258 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd717731-6493-4479-a9bf-e0c93d9f9032" containerName="container-00" Nov 25 11:22:49 crc kubenswrapper[4854]: I1125 11:22:49.730512 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sm8s5/crc-debug-8stxm" Nov 25 11:22:49 crc kubenswrapper[4854]: I1125 11:22:49.831558 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/30d7621d-069e-482d-bc87-39807023751f-host\") pod \"crc-debug-8stxm\" (UID: \"30d7621d-069e-482d-bc87-39807023751f\") " pod="openshift-must-gather-sm8s5/crc-debug-8stxm" Nov 25 11:22:49 crc kubenswrapper[4854]: I1125 11:22:49.831912 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s6sgv\" (UniqueName: \"kubernetes.io/projected/30d7621d-069e-482d-bc87-39807023751f-kube-api-access-s6sgv\") pod \"crc-debug-8stxm\" (UID: \"30d7621d-069e-482d-bc87-39807023751f\") " pod="openshift-must-gather-sm8s5/crc-debug-8stxm" Nov 25 11:22:49 crc kubenswrapper[4854]: I1125 11:22:49.933915 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/30d7621d-069e-482d-bc87-39807023751f-host\") pod \"crc-debug-8stxm\" (UID: \"30d7621d-069e-482d-bc87-39807023751f\") " pod="openshift-must-gather-sm8s5/crc-debug-8stxm" Nov 25 11:22:49 crc kubenswrapper[4854]: I1125 11:22:49.934068 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s6sgv\" (UniqueName: \"kubernetes.io/projected/30d7621d-069e-482d-bc87-39807023751f-kube-api-access-s6sgv\") pod \"crc-debug-8stxm\" (UID: \"30d7621d-069e-482d-bc87-39807023751f\") " pod="openshift-must-gather-sm8s5/crc-debug-8stxm" Nov 25 11:22:49 crc kubenswrapper[4854]: I1125 11:22:49.934078 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/30d7621d-069e-482d-bc87-39807023751f-host\") pod \"crc-debug-8stxm\" (UID: \"30d7621d-069e-482d-bc87-39807023751f\") " pod="openshift-must-gather-sm8s5/crc-debug-8stxm" Nov 25 11:22:49 crc kubenswrapper[4854]: I1125 11:22:49.971557 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s6sgv\" (UniqueName: \"kubernetes.io/projected/30d7621d-069e-482d-bc87-39807023751f-kube-api-access-s6sgv\") pod \"crc-debug-8stxm\" (UID: \"30d7621d-069e-482d-bc87-39807023751f\") " pod="openshift-must-gather-sm8s5/crc-debug-8stxm" Nov 25 11:22:50 crc kubenswrapper[4854]: I1125 11:22:50.049612 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sm8s5/crc-debug-8stxm" Nov 25 11:22:50 crc kubenswrapper[4854]: I1125 11:22:50.384213 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sm8s5/crc-debug-8stxm" event={"ID":"30d7621d-069e-482d-bc87-39807023751f","Type":"ContainerStarted","Data":"cca9ae981decd1fe0a1ad67c9e8085c991bb730d6540f1e0906fbd1fd298dbfb"} Nov 25 11:22:51 crc kubenswrapper[4854]: I1125 11:22:51.397475 4854 generic.go:334] "Generic (PLEG): container finished" podID="30d7621d-069e-482d-bc87-39807023751f" containerID="e1520995846f9712e3620f8aa78ebce8cef85ce00f5f8c611ab33c18fdffd6ac" exitCode=0 Nov 25 11:22:51 crc kubenswrapper[4854]: I1125 11:22:51.397734 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sm8s5/crc-debug-8stxm" event={"ID":"30d7621d-069e-482d-bc87-39807023751f","Type":"ContainerDied","Data":"e1520995846f9712e3620f8aa78ebce8cef85ce00f5f8c611ab33c18fdffd6ac"} Nov 25 11:22:52 crc kubenswrapper[4854]: I1125 11:22:52.544916 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sm8s5/crc-debug-8stxm" Nov 25 11:22:52 crc kubenswrapper[4854]: I1125 11:22:52.606592 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/30d7621d-069e-482d-bc87-39807023751f-host\") pod \"30d7621d-069e-482d-bc87-39807023751f\" (UID: \"30d7621d-069e-482d-bc87-39807023751f\") " Nov 25 11:22:52 crc kubenswrapper[4854]: I1125 11:22:52.606661 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/30d7621d-069e-482d-bc87-39807023751f-host" (OuterVolumeSpecName: "host") pod "30d7621d-069e-482d-bc87-39807023751f" (UID: "30d7621d-069e-482d-bc87-39807023751f"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 11:22:52 crc kubenswrapper[4854]: I1125 11:22:52.607146 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s6sgv\" (UniqueName: \"kubernetes.io/projected/30d7621d-069e-482d-bc87-39807023751f-kube-api-access-s6sgv\") pod \"30d7621d-069e-482d-bc87-39807023751f\" (UID: \"30d7621d-069e-482d-bc87-39807023751f\") " Nov 25 11:22:52 crc kubenswrapper[4854]: I1125 11:22:52.608396 4854 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/30d7621d-069e-482d-bc87-39807023751f-host\") on node \"crc\" DevicePath \"\"" Nov 25 11:22:52 crc kubenswrapper[4854]: I1125 11:22:52.613818 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/30d7621d-069e-482d-bc87-39807023751f-kube-api-access-s6sgv" (OuterVolumeSpecName: "kube-api-access-s6sgv") pod "30d7621d-069e-482d-bc87-39807023751f" (UID: "30d7621d-069e-482d-bc87-39807023751f"). InnerVolumeSpecName "kube-api-access-s6sgv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:22:52 crc kubenswrapper[4854]: I1125 11:22:52.709915 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s6sgv\" (UniqueName: \"kubernetes.io/projected/30d7621d-069e-482d-bc87-39807023751f-kube-api-access-s6sgv\") on node \"crc\" DevicePath \"\"" Nov 25 11:22:53 crc kubenswrapper[4854]: I1125 11:22:53.429898 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sm8s5/crc-debug-8stxm" event={"ID":"30d7621d-069e-482d-bc87-39807023751f","Type":"ContainerDied","Data":"cca9ae981decd1fe0a1ad67c9e8085c991bb730d6540f1e0906fbd1fd298dbfb"} Nov 25 11:22:53 crc kubenswrapper[4854]: I1125 11:22:53.430172 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cca9ae981decd1fe0a1ad67c9e8085c991bb730d6540f1e0906fbd1fd298dbfb" Nov 25 11:22:53 crc kubenswrapper[4854]: I1125 11:22:53.430237 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sm8s5/crc-debug-8stxm" Nov 25 11:22:53 crc kubenswrapper[4854]: I1125 11:22:53.944220 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-sm8s5/crc-debug-8stxm"] Nov 25 11:22:53 crc kubenswrapper[4854]: I1125 11:22:53.961052 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-sm8s5/crc-debug-8stxm"] Nov 25 11:22:55 crc kubenswrapper[4854]: I1125 11:22:55.030249 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="30d7621d-069e-482d-bc87-39807023751f" path="/var/lib/kubelet/pods/30d7621d-069e-482d-bc87-39807023751f/volumes" Nov 25 11:22:55 crc kubenswrapper[4854]: I1125 11:22:55.171107 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-sm8s5/crc-debug-m7r2m"] Nov 25 11:22:55 crc kubenswrapper[4854]: E1125 11:22:55.171881 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30d7621d-069e-482d-bc87-39807023751f" containerName="container-00" Nov 25 11:22:55 crc kubenswrapper[4854]: I1125 11:22:55.171905 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="30d7621d-069e-482d-bc87-39807023751f" containerName="container-00" Nov 25 11:22:55 crc kubenswrapper[4854]: I1125 11:22:55.172218 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="30d7621d-069e-482d-bc87-39807023751f" containerName="container-00" Nov 25 11:22:55 crc kubenswrapper[4854]: I1125 11:22:55.173121 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sm8s5/crc-debug-m7r2m" Nov 25 11:22:55 crc kubenswrapper[4854]: I1125 11:22:55.270939 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fqgmf\" (UniqueName: \"kubernetes.io/projected/b8f9fad1-82ea-496c-a732-3463340393c3-kube-api-access-fqgmf\") pod \"crc-debug-m7r2m\" (UID: \"b8f9fad1-82ea-496c-a732-3463340393c3\") " pod="openshift-must-gather-sm8s5/crc-debug-m7r2m" Nov 25 11:22:55 crc kubenswrapper[4854]: I1125 11:22:55.271363 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b8f9fad1-82ea-496c-a732-3463340393c3-host\") pod \"crc-debug-m7r2m\" (UID: \"b8f9fad1-82ea-496c-a732-3463340393c3\") " pod="openshift-must-gather-sm8s5/crc-debug-m7r2m" Nov 25 11:22:55 crc kubenswrapper[4854]: I1125 11:22:55.373344 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b8f9fad1-82ea-496c-a732-3463340393c3-host\") pod \"crc-debug-m7r2m\" (UID: \"b8f9fad1-82ea-496c-a732-3463340393c3\") " pod="openshift-must-gather-sm8s5/crc-debug-m7r2m" Nov 25 11:22:55 crc kubenswrapper[4854]: I1125 11:22:55.373443 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fqgmf\" (UniqueName: \"kubernetes.io/projected/b8f9fad1-82ea-496c-a732-3463340393c3-kube-api-access-fqgmf\") pod \"crc-debug-m7r2m\" (UID: \"b8f9fad1-82ea-496c-a732-3463340393c3\") " pod="openshift-must-gather-sm8s5/crc-debug-m7r2m" Nov 25 11:22:55 crc kubenswrapper[4854]: I1125 11:22:55.373474 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b8f9fad1-82ea-496c-a732-3463340393c3-host\") pod \"crc-debug-m7r2m\" (UID: \"b8f9fad1-82ea-496c-a732-3463340393c3\") " pod="openshift-must-gather-sm8s5/crc-debug-m7r2m" Nov 25 11:22:55 crc kubenswrapper[4854]: I1125 11:22:55.406529 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fqgmf\" (UniqueName: \"kubernetes.io/projected/b8f9fad1-82ea-496c-a732-3463340393c3-kube-api-access-fqgmf\") pod \"crc-debug-m7r2m\" (UID: \"b8f9fad1-82ea-496c-a732-3463340393c3\") " pod="openshift-must-gather-sm8s5/crc-debug-m7r2m" Nov 25 11:22:55 crc kubenswrapper[4854]: I1125 11:22:55.499022 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sm8s5/crc-debug-m7r2m" Nov 25 11:22:56 crc kubenswrapper[4854]: I1125 11:22:56.477824 4854 generic.go:334] "Generic (PLEG): container finished" podID="b8f9fad1-82ea-496c-a732-3463340393c3" containerID="bdae89874c3bbc3fa8ff4abae350fad7c8e5127330e2b3b8abc7c919ad3bd0f9" exitCode=0 Nov 25 11:22:56 crc kubenswrapper[4854]: I1125 11:22:56.477874 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sm8s5/crc-debug-m7r2m" event={"ID":"b8f9fad1-82ea-496c-a732-3463340393c3","Type":"ContainerDied","Data":"bdae89874c3bbc3fa8ff4abae350fad7c8e5127330e2b3b8abc7c919ad3bd0f9"} Nov 25 11:22:56 crc kubenswrapper[4854]: I1125 11:22:56.478145 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sm8s5/crc-debug-m7r2m" event={"ID":"b8f9fad1-82ea-496c-a732-3463340393c3","Type":"ContainerStarted","Data":"4ed5183aad693c122c86df9d75051251bb9f99b54f105151419f15b2ad59d7cb"} Nov 25 11:22:56 crc kubenswrapper[4854]: I1125 11:22:56.536580 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-sm8s5/crc-debug-m7r2m"] Nov 25 11:22:56 crc kubenswrapper[4854]: I1125 11:22:56.553394 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-sm8s5/crc-debug-m7r2m"] Nov 25 11:22:57 crc kubenswrapper[4854]: I1125 11:22:57.644432 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sm8s5/crc-debug-m7r2m" Nov 25 11:22:57 crc kubenswrapper[4854]: I1125 11:22:57.842988 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqgmf\" (UniqueName: \"kubernetes.io/projected/b8f9fad1-82ea-496c-a732-3463340393c3-kube-api-access-fqgmf\") pod \"b8f9fad1-82ea-496c-a732-3463340393c3\" (UID: \"b8f9fad1-82ea-496c-a732-3463340393c3\") " Nov 25 11:22:57 crc kubenswrapper[4854]: I1125 11:22:57.843047 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b8f9fad1-82ea-496c-a732-3463340393c3-host\") pod \"b8f9fad1-82ea-496c-a732-3463340393c3\" (UID: \"b8f9fad1-82ea-496c-a732-3463340393c3\") " Nov 25 11:22:57 crc kubenswrapper[4854]: I1125 11:22:57.843950 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b8f9fad1-82ea-496c-a732-3463340393c3-host" (OuterVolumeSpecName: "host") pod "b8f9fad1-82ea-496c-a732-3463340393c3" (UID: "b8f9fad1-82ea-496c-a732-3463340393c3"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 11:22:57 crc kubenswrapper[4854]: I1125 11:22:57.855379 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b8f9fad1-82ea-496c-a732-3463340393c3-kube-api-access-fqgmf" (OuterVolumeSpecName: "kube-api-access-fqgmf") pod "b8f9fad1-82ea-496c-a732-3463340393c3" (UID: "b8f9fad1-82ea-496c-a732-3463340393c3"). InnerVolumeSpecName "kube-api-access-fqgmf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:22:57 crc kubenswrapper[4854]: I1125 11:22:57.946109 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqgmf\" (UniqueName: \"kubernetes.io/projected/b8f9fad1-82ea-496c-a732-3463340393c3-kube-api-access-fqgmf\") on node \"crc\" DevicePath \"\"" Nov 25 11:22:57 crc kubenswrapper[4854]: I1125 11:22:57.946354 4854 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b8f9fad1-82ea-496c-a732-3463340393c3-host\") on node \"crc\" DevicePath \"\"" Nov 25 11:22:58 crc kubenswrapper[4854]: I1125 11:22:58.501190 4854 scope.go:117] "RemoveContainer" containerID="bdae89874c3bbc3fa8ff4abae350fad7c8e5127330e2b3b8abc7c919ad3bd0f9" Nov 25 11:22:58 crc kubenswrapper[4854]: I1125 11:22:58.501226 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sm8s5/crc-debug-m7r2m" Nov 25 11:22:59 crc kubenswrapper[4854]: I1125 11:22:59.047696 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b8f9fad1-82ea-496c-a732-3463340393c3" path="/var/lib/kubelet/pods/b8f9fad1-82ea-496c-a732-3463340393c3/volumes" Nov 25 11:23:07 crc kubenswrapper[4854]: I1125 11:23:07.821103 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-t4wc2"] Nov 25 11:23:07 crc kubenswrapper[4854]: E1125 11:23:07.822105 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8f9fad1-82ea-496c-a732-3463340393c3" containerName="container-00" Nov 25 11:23:07 crc kubenswrapper[4854]: I1125 11:23:07.822118 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8f9fad1-82ea-496c-a732-3463340393c3" containerName="container-00" Nov 25 11:23:07 crc kubenswrapper[4854]: I1125 11:23:07.822340 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8f9fad1-82ea-496c-a732-3463340393c3" containerName="container-00" Nov 25 11:23:07 crc kubenswrapper[4854]: I1125 11:23:07.824420 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t4wc2" Nov 25 11:23:07 crc kubenswrapper[4854]: I1125 11:23:07.839439 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-t4wc2"] Nov 25 11:23:07 crc kubenswrapper[4854]: I1125 11:23:07.912109 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x42tb\" (UniqueName: \"kubernetes.io/projected/cba11762-4763-49ac-9d65-ff8b2d2d665a-kube-api-access-x42tb\") pod \"redhat-operators-t4wc2\" (UID: \"cba11762-4763-49ac-9d65-ff8b2d2d665a\") " pod="openshift-marketplace/redhat-operators-t4wc2" Nov 25 11:23:07 crc kubenswrapper[4854]: I1125 11:23:07.912243 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cba11762-4763-49ac-9d65-ff8b2d2d665a-utilities\") pod \"redhat-operators-t4wc2\" (UID: \"cba11762-4763-49ac-9d65-ff8b2d2d665a\") " pod="openshift-marketplace/redhat-operators-t4wc2" Nov 25 11:23:07 crc kubenswrapper[4854]: I1125 11:23:07.912334 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cba11762-4763-49ac-9d65-ff8b2d2d665a-catalog-content\") pod \"redhat-operators-t4wc2\" (UID: \"cba11762-4763-49ac-9d65-ff8b2d2d665a\") " pod="openshift-marketplace/redhat-operators-t4wc2" Nov 25 11:23:08 crc kubenswrapper[4854]: I1125 11:23:08.014532 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cba11762-4763-49ac-9d65-ff8b2d2d665a-catalog-content\") pod \"redhat-operators-t4wc2\" (UID: \"cba11762-4763-49ac-9d65-ff8b2d2d665a\") " pod="openshift-marketplace/redhat-operators-t4wc2" Nov 25 11:23:08 crc kubenswrapper[4854]: I1125 11:23:08.014767 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x42tb\" (UniqueName: \"kubernetes.io/projected/cba11762-4763-49ac-9d65-ff8b2d2d665a-kube-api-access-x42tb\") pod \"redhat-operators-t4wc2\" (UID: \"cba11762-4763-49ac-9d65-ff8b2d2d665a\") " pod="openshift-marketplace/redhat-operators-t4wc2" Nov 25 11:23:08 crc kubenswrapper[4854]: I1125 11:23:08.014857 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cba11762-4763-49ac-9d65-ff8b2d2d665a-utilities\") pod \"redhat-operators-t4wc2\" (UID: \"cba11762-4763-49ac-9d65-ff8b2d2d665a\") " pod="openshift-marketplace/redhat-operators-t4wc2" Nov 25 11:23:08 crc kubenswrapper[4854]: I1125 11:23:08.015068 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cba11762-4763-49ac-9d65-ff8b2d2d665a-catalog-content\") pod \"redhat-operators-t4wc2\" (UID: \"cba11762-4763-49ac-9d65-ff8b2d2d665a\") " pod="openshift-marketplace/redhat-operators-t4wc2" Nov 25 11:23:08 crc kubenswrapper[4854]: I1125 11:23:08.015618 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cba11762-4763-49ac-9d65-ff8b2d2d665a-utilities\") pod \"redhat-operators-t4wc2\" (UID: \"cba11762-4763-49ac-9d65-ff8b2d2d665a\") " pod="openshift-marketplace/redhat-operators-t4wc2" Nov 25 11:23:08 crc kubenswrapper[4854]: I1125 11:23:08.048168 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x42tb\" (UniqueName: \"kubernetes.io/projected/cba11762-4763-49ac-9d65-ff8b2d2d665a-kube-api-access-x42tb\") pod \"redhat-operators-t4wc2\" (UID: \"cba11762-4763-49ac-9d65-ff8b2d2d665a\") " pod="openshift-marketplace/redhat-operators-t4wc2" Nov 25 11:23:08 crc kubenswrapper[4854]: I1125 11:23:08.168038 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t4wc2" Nov 25 11:23:08 crc kubenswrapper[4854]: I1125 11:23:08.807218 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-t4wc2"] Nov 25 11:23:09 crc kubenswrapper[4854]: I1125 11:23:09.623567 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t4wc2" event={"ID":"cba11762-4763-49ac-9d65-ff8b2d2d665a","Type":"ContainerStarted","Data":"55d8b45209c7c386ad63c4b9b8755d934a6b865c711ad9d6926a0f8421eea524"} Nov 25 11:23:09 crc kubenswrapper[4854]: I1125 11:23:09.624977 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t4wc2" event={"ID":"cba11762-4763-49ac-9d65-ff8b2d2d665a","Type":"ContainerStarted","Data":"ea2205c5474e14d2deecec772fa037f7302f3b9b35743e059267da87979200d5"} Nov 25 11:23:10 crc kubenswrapper[4854]: I1125 11:23:10.636480 4854 generic.go:334] "Generic (PLEG): container finished" podID="cba11762-4763-49ac-9d65-ff8b2d2d665a" containerID="55d8b45209c7c386ad63c4b9b8755d934a6b865c711ad9d6926a0f8421eea524" exitCode=0 Nov 25 11:23:10 crc kubenswrapper[4854]: I1125 11:23:10.636523 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t4wc2" event={"ID":"cba11762-4763-49ac-9d65-ff8b2d2d665a","Type":"ContainerDied","Data":"55d8b45209c7c386ad63c4b9b8755d934a6b865c711ad9d6926a0f8421eea524"} Nov 25 11:23:12 crc kubenswrapper[4854]: I1125 11:23:12.698382 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t4wc2" event={"ID":"cba11762-4763-49ac-9d65-ff8b2d2d665a","Type":"ContainerStarted","Data":"3b8de68a5b2bec321f50434dc0e669b2b39363f669c2b0d4132a44f8b91c75cb"} Nov 25 11:23:18 crc kubenswrapper[4854]: I1125 11:23:18.769635 4854 generic.go:334] "Generic (PLEG): container finished" podID="cba11762-4763-49ac-9d65-ff8b2d2d665a" containerID="3b8de68a5b2bec321f50434dc0e669b2b39363f669c2b0d4132a44f8b91c75cb" exitCode=0 Nov 25 11:23:18 crc kubenswrapper[4854]: I1125 11:23:18.769706 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t4wc2" event={"ID":"cba11762-4763-49ac-9d65-ff8b2d2d665a","Type":"ContainerDied","Data":"3b8de68a5b2bec321f50434dc0e669b2b39363f669c2b0d4132a44f8b91c75cb"} Nov 25 11:23:19 crc kubenswrapper[4854]: I1125 11:23:19.783442 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t4wc2" event={"ID":"cba11762-4763-49ac-9d65-ff8b2d2d665a","Type":"ContainerStarted","Data":"1b3ab315649f088330cbbe1424612fbac65558d65417a37712b1442741dd3dc6"} Nov 25 11:23:19 crc kubenswrapper[4854]: I1125 11:23:19.802041 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-t4wc2" podStartSLOduration=4.118983078 podStartE2EDuration="12.802020366s" podCreationTimestamp="2025-11-25 11:23:07 +0000 UTC" firstStartedPulling="2025-11-25 11:23:10.640408421 +0000 UTC m=+6396.493401797" lastFinishedPulling="2025-11-25 11:23:19.323445709 +0000 UTC m=+6405.176439085" observedRunningTime="2025-11-25 11:23:19.79891294 +0000 UTC m=+6405.651906316" watchObservedRunningTime="2025-11-25 11:23:19.802020366 +0000 UTC m=+6405.655013742" Nov 25 11:23:27 crc kubenswrapper[4854]: I1125 11:23:27.808018 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_93de10f0-290f-47f5-bb09-214e32227b84/aodh-api/0.log" Nov 25 11:23:27 crc kubenswrapper[4854]: I1125 11:23:27.868836 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_93de10f0-290f-47f5-bb09-214e32227b84/aodh-listener/0.log" Nov 25 11:23:27 crc kubenswrapper[4854]: I1125 11:23:27.934973 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_93de10f0-290f-47f5-bb09-214e32227b84/aodh-evaluator/0.log" Nov 25 11:23:28 crc kubenswrapper[4854]: I1125 11:23:28.132536 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_93de10f0-290f-47f5-bb09-214e32227b84/aodh-notifier/0.log" Nov 25 11:23:28 crc kubenswrapper[4854]: I1125 11:23:28.169319 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-t4wc2" Nov 25 11:23:28 crc kubenswrapper[4854]: I1125 11:23:28.169370 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-t4wc2" Nov 25 11:23:28 crc kubenswrapper[4854]: I1125 11:23:28.193263 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-dd6ff4df6-gfkgf_941bda47-168e-496a-b60f-c4edb4560bcc/barbican-api-log/0.log" Nov 25 11:23:28 crc kubenswrapper[4854]: I1125 11:23:28.199969 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-dd6ff4df6-gfkgf_941bda47-168e-496a-b60f-c4edb4560bcc/barbican-api/0.log" Nov 25 11:23:28 crc kubenswrapper[4854]: I1125 11:23:28.470375 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-84d58997f8-bpzfk_3d226a10-004d-4b8c-8282-4f7955c8d41f/barbican-keystone-listener/0.log" Nov 25 11:23:28 crc kubenswrapper[4854]: I1125 11:23:28.603821 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-84d58997f8-bpzfk_3d226a10-004d-4b8c-8282-4f7955c8d41f/barbican-keystone-listener-log/0.log" Nov 25 11:23:28 crc kubenswrapper[4854]: I1125 11:23:28.725663 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-586fb5bfc9-zcsm7_fd27c53c-e9d6-40de-9d4c-fea018061c07/barbican-worker/0.log" Nov 25 11:23:28 crc kubenswrapper[4854]: I1125 11:23:28.824853 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-586fb5bfc9-zcsm7_fd27c53c-e9d6-40de-9d4c-fea018061c07/barbican-worker-log/0.log" Nov 25 11:23:28 crc kubenswrapper[4854]: I1125 11:23:28.899423 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-bdk7f_82274506-4003-44e9-86ac-996dfe014de0/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 11:23:29 crc kubenswrapper[4854]: I1125 11:23:29.191982 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_e73606dc-c7c0-4d1e-9f87-5effe3a03611/ceilometer-central-agent/0.log" Nov 25 11:23:29 crc kubenswrapper[4854]: I1125 11:23:29.204446 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_e73606dc-c7c0-4d1e-9f87-5effe3a03611/proxy-httpd/0.log" Nov 25 11:23:29 crc kubenswrapper[4854]: I1125 11:23:29.231577 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-t4wc2" podUID="cba11762-4763-49ac-9d65-ff8b2d2d665a" containerName="registry-server" probeResult="failure" output=< Nov 25 11:23:29 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:23:29 crc kubenswrapper[4854]: > Nov 25 11:23:29 crc kubenswrapper[4854]: I1125 11:23:29.242570 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_e73606dc-c7c0-4d1e-9f87-5effe3a03611/ceilometer-notification-agent/0.log" Nov 25 11:23:29 crc kubenswrapper[4854]: I1125 11:23:29.338629 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_e73606dc-c7c0-4d1e-9f87-5effe3a03611/sg-core/0.log" Nov 25 11:23:29 crc kubenswrapper[4854]: I1125 11:23:29.510105 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_cd9acce9-fd86-43e8-8359-0a31686cdc7a/cinder-api-log/0.log" Nov 25 11:23:29 crc kubenswrapper[4854]: I1125 11:23:29.593430 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_cd9acce9-fd86-43e8-8359-0a31686cdc7a/cinder-api/0.log" Nov 25 11:23:29 crc kubenswrapper[4854]: I1125 11:23:29.759203 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_bfc85811-fd01-48df-99bf-1220134a32b2/cinder-scheduler/0.log" Nov 25 11:23:29 crc kubenswrapper[4854]: I1125 11:23:29.896278 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-b6lkz_73c864f0-2662-48ed-9d9c-f04c714af11f/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 11:23:29 crc kubenswrapper[4854]: I1125 11:23:29.926158 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_bfc85811-fd01-48df-99bf-1220134a32b2/probe/0.log" Nov 25 11:23:30 crc kubenswrapper[4854]: I1125 11:23:30.215254 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-22qxt_751a11f6-66a6-4336-81ff-8138ffe4f076/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 11:23:30 crc kubenswrapper[4854]: I1125 11:23:30.238808 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5d75f767dc-mjfvn_51592fc8-630b-49d8-979a-d1ad4c3962f6/init/0.log" Nov 25 11:23:30 crc kubenswrapper[4854]: I1125 11:23:30.472367 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5d75f767dc-mjfvn_51592fc8-630b-49d8-979a-d1ad4c3962f6/init/0.log" Nov 25 11:23:30 crc kubenswrapper[4854]: I1125 11:23:30.524099 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5d75f767dc-mjfvn_51592fc8-630b-49d8-979a-d1ad4c3962f6/dnsmasq-dns/0.log" Nov 25 11:23:30 crc kubenswrapper[4854]: I1125 11:23:30.598711 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-9bxcq_51c1f6ef-dc9b-4a3a-be06-21f8c34594b1/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 11:23:30 crc kubenswrapper[4854]: I1125 11:23:30.785888 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_953a22a5-3c0c-402b-a4e1-35dfcea8f92f/glance-log/0.log" Nov 25 11:23:30 crc kubenswrapper[4854]: I1125 11:23:30.825487 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_953a22a5-3c0c-402b-a4e1-35dfcea8f92f/glance-httpd/0.log" Nov 25 11:23:31 crc kubenswrapper[4854]: I1125 11:23:31.024258 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_02ae5459-cfb7-4cce-a81b-7c0f28eca1aa/glance-log/0.log" Nov 25 11:23:31 crc kubenswrapper[4854]: I1125 11:23:31.110372 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_02ae5459-cfb7-4cce-a81b-7c0f28eca1aa/glance-httpd/0.log" Nov 25 11:23:31 crc kubenswrapper[4854]: I1125 11:23:31.713974 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-engine-5688985587-fs97x_935ce673-59b2-4651-9193-12afdb60ed71/heat-engine/0.log" Nov 25 11:23:32 crc kubenswrapper[4854]: I1125 11:23:32.007450 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-9tltd_7e3d0041-519a-4744-9c38-ae10ebbd0812/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 11:23:32 crc kubenswrapper[4854]: I1125 11:23:32.227757 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-api-5f59c54579-mqcrl_0bdea020-6056-4c2f-afb7-c715fb1116c0/heat-api/0.log" Nov 25 11:23:32 crc kubenswrapper[4854]: I1125 11:23:32.311582 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-cfnapi-77db595d79-zsh9l_5d90bbbc-ccfe-4462-a8b5-3d3c4cbbcf3d/heat-cfnapi/0.log" Nov 25 11:23:32 crc kubenswrapper[4854]: I1125 11:23:32.336904 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-499vn_9e84a5bb-da4d-40aa-a555-a63bd8bd3f10/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 11:23:32 crc kubenswrapper[4854]: I1125 11:23:32.608182 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29401141-2fwff_915cfbe7-9150-4dc9-8a66-8195d7d3c4c8/keystone-cron/0.log" Nov 25 11:23:32 crc kubenswrapper[4854]: I1125 11:23:32.626537 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29401081-jn8kd_ee7b8559-e810-42ef-99b7-e206a817fd29/keystone-cron/0.log" Nov 25 11:23:32 crc kubenswrapper[4854]: I1125 11:23:32.852139 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_cc627576-3561-41b8-9d76-f69680c1012a/kube-state-metrics/0.log" Nov 25 11:23:33 crc kubenswrapper[4854]: I1125 11:23:33.005278 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-qfxw9_897c33c5-2a69-4163-a965-7ebe1881ce1e/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 11:23:33 crc kubenswrapper[4854]: I1125 11:23:33.062857 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-6775cd4556-vz69t_c6297e85-6f7a-479d-9109-1e0f5c5a8cb8/keystone-api/0.log" Nov 25 11:23:33 crc kubenswrapper[4854]: I1125 11:23:33.190196 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_logging-edpm-deployment-openstack-edpm-ipam-5hbrg_24119236-3cfd-4959-b117-36042fab9fb3/logging-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 11:23:33 crc kubenswrapper[4854]: I1125 11:23:33.415850 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mysqld-exporter-0_35ef28aa-b004-4616-8de8-0a88444ab5f2/mysqld-exporter/0.log" Nov 25 11:23:33 crc kubenswrapper[4854]: I1125 11:23:33.871473 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-6f86b9df97-x4wkv_d0ba70f0-4fdd-461d-a0be-4af340f425a0/neutron-httpd/0.log" Nov 25 11:23:33 crc kubenswrapper[4854]: I1125 11:23:33.883456 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-7nwm7_40369967-68d4-4fd5-aa53-432f0c6d4e0f/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 11:23:33 crc kubenswrapper[4854]: I1125 11:23:33.955865 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-6f86b9df97-x4wkv_d0ba70f0-4fdd-461d-a0be-4af340f425a0/neutron-api/0.log" Nov 25 11:23:34 crc kubenswrapper[4854]: I1125 11:23:34.930625 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_d267893d-1cd2-420b-a2ad-b95f2e2729e1/nova-cell0-conductor-conductor/0.log" Nov 25 11:23:35 crc kubenswrapper[4854]: I1125 11:23:35.061301 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_4367eb79-33da-49b0-8471-f07db0d493b4/nova-cell1-conductor-conductor/0.log" Nov 25 11:23:35 crc kubenswrapper[4854]: I1125 11:23:35.102901 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_d276cec0-76c5-44c3-91a5-669c20aaba25/nova-api-log/0.log" Nov 25 11:23:35 crc kubenswrapper[4854]: I1125 11:23:35.471905 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_fe4a1f25-2ac0-4fa9-8abd-934878c41b2a/nova-cell1-novncproxy-novncproxy/0.log" Nov 25 11:23:35 crc kubenswrapper[4854]: I1125 11:23:35.492261 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_d276cec0-76c5-44c3-91a5-669c20aaba25/nova-api-api/0.log" Nov 25 11:23:35 crc kubenswrapper[4854]: I1125 11:23:35.539586 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-dlf9r_a73a64c8-e9bd-44ce-a844-a7ab9b1c3047/nova-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 11:23:35 crc kubenswrapper[4854]: I1125 11:23:35.878331 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_abaf3b4c-e24e-4387-950e-b0b50ed7d0e5/nova-metadata-log/0.log" Nov 25 11:23:36 crc kubenswrapper[4854]: I1125 11:23:36.156939 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_3c760277-34b8-4444-9c0f-c5a7b572f4ed/nova-scheduler-scheduler/0.log" Nov 25 11:23:36 crc kubenswrapper[4854]: I1125 11:23:36.161179 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_827c4948-d1bc-4c63-838b-57f267bdcf93/mysql-bootstrap/0.log" Nov 25 11:23:36 crc kubenswrapper[4854]: I1125 11:23:36.754330 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_827c4948-d1bc-4c63-838b-57f267bdcf93/galera/0.log" Nov 25 11:23:36 crc kubenswrapper[4854]: I1125 11:23:36.858481 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_827c4948-d1bc-4c63-838b-57f267bdcf93/mysql-bootstrap/0.log" Nov 25 11:23:36 crc kubenswrapper[4854]: I1125 11:23:36.967758 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_0984ac84-1833-4ddb-b21b-d526b64e9991/mysql-bootstrap/0.log" Nov 25 11:23:37 crc kubenswrapper[4854]: I1125 11:23:37.259222 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_0984ac84-1833-4ddb-b21b-d526b64e9991/mysql-bootstrap/0.log" Nov 25 11:23:37 crc kubenswrapper[4854]: I1125 11:23:37.321159 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_0984ac84-1833-4ddb-b21b-d526b64e9991/galera/0.log" Nov 25 11:23:37 crc kubenswrapper[4854]: I1125 11:23:37.471838 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_b91ef9a0-60a5-4dd9-9239-a784c885f332/openstackclient/0.log" Nov 25 11:23:37 crc kubenswrapper[4854]: I1125 11:23:37.542904 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-7dl26_04573f28-a6e2-46ca-8a02-a2265c5d68e9/ovn-controller/0.log" Nov 25 11:23:37 crc kubenswrapper[4854]: I1125 11:23:37.781630 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-ff4zf_39cb343e-3bd0-4a63-b3dc-6814e8e8bca1/openstack-network-exporter/0.log" Nov 25 11:23:38 crc kubenswrapper[4854]: I1125 11:23:38.039985 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-2t2j4_f1553410-0e8c-4a68-89ed-67f3eeef891d/ovsdb-server-init/0.log" Nov 25 11:23:38 crc kubenswrapper[4854]: I1125 11:23:38.263846 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-2t2j4_f1553410-0e8c-4a68-89ed-67f3eeef891d/ovs-vswitchd/0.log" Nov 25 11:23:38 crc kubenswrapper[4854]: I1125 11:23:38.279297 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-2t2j4_f1553410-0e8c-4a68-89ed-67f3eeef891d/ovsdb-server/0.log" Nov 25 11:23:38 crc kubenswrapper[4854]: I1125 11:23:38.290563 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-2t2j4_f1553410-0e8c-4a68-89ed-67f3eeef891d/ovsdb-server-init/0.log" Nov 25 11:23:38 crc kubenswrapper[4854]: I1125 11:23:38.581711 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-hrlfd_6df14f5b-e51a-4692-8452-03f1cfa9eabb/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 11:23:38 crc kubenswrapper[4854]: I1125 11:23:38.582740 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_abaf3b4c-e24e-4387-950e-b0b50ed7d0e5/nova-metadata-metadata/0.log" Nov 25 11:23:38 crc kubenswrapper[4854]: I1125 11:23:38.805472 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_6c660812-03bd-4475-895a-d896c14ef125/ovn-northd/0.log" Nov 25 11:23:38 crc kubenswrapper[4854]: I1125 11:23:38.852963 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_6c660812-03bd-4475-895a-d896c14ef125/openstack-network-exporter/0.log" Nov 25 11:23:38 crc kubenswrapper[4854]: I1125 11:23:38.914065 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_e868f46b-cfbf-4642-ad85-c32884fca542/openstack-network-exporter/0.log" Nov 25 11:23:39 crc kubenswrapper[4854]: I1125 11:23:39.053583 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_e868f46b-cfbf-4642-ad85-c32884fca542/ovsdbserver-nb/0.log" Nov 25 11:23:39 crc kubenswrapper[4854]: I1125 11:23:39.165723 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_1ce61789-25f3-44d3-813d-d51d10d068f1/openstack-network-exporter/0.log" Nov 25 11:23:39 crc kubenswrapper[4854]: I1125 11:23:39.172945 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_1ce61789-25f3-44d3-813d-d51d10d068f1/ovsdbserver-sb/0.log" Nov 25 11:23:39 crc kubenswrapper[4854]: I1125 11:23:39.221716 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-t4wc2" podUID="cba11762-4763-49ac-9d65-ff8b2d2d665a" containerName="registry-server" probeResult="failure" output=< Nov 25 11:23:39 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:23:39 crc kubenswrapper[4854]: > Nov 25 11:23:39 crc kubenswrapper[4854]: I1125 11:23:39.612574 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_558820bb-fc66-444a-96d3-107dbc60fb3f/init-config-reloader/0.log" Nov 25 11:23:39 crc kubenswrapper[4854]: I1125 11:23:39.681590 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-557b4bfdc4-lt7bk_893affd4-934c-4901-933d-5b28b78ca519/placement-api/0.log" Nov 25 11:23:39 crc kubenswrapper[4854]: I1125 11:23:39.698297 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-557b4bfdc4-lt7bk_893affd4-934c-4901-933d-5b28b78ca519/placement-log/0.log" Nov 25 11:23:39 crc kubenswrapper[4854]: I1125 11:23:39.825351 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_558820bb-fc66-444a-96d3-107dbc60fb3f/init-config-reloader/0.log" Nov 25 11:23:39 crc kubenswrapper[4854]: I1125 11:23:39.933195 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_558820bb-fc66-444a-96d3-107dbc60fb3f/thanos-sidecar/0.log" Nov 25 11:23:39 crc kubenswrapper[4854]: I1125 11:23:39.935403 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_558820bb-fc66-444a-96d3-107dbc60fb3f/config-reloader/0.log" Nov 25 11:23:39 crc kubenswrapper[4854]: I1125 11:23:39.966572 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_558820bb-fc66-444a-96d3-107dbc60fb3f/prometheus/0.log" Nov 25 11:23:40 crc kubenswrapper[4854]: I1125 11:23:40.179341 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_bb562532-b4b2-42ad-9d8e-a9b230a3bcf5/setup-container/0.log" Nov 25 11:23:40 crc kubenswrapper[4854]: I1125 11:23:40.437369 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_bb562532-b4b2-42ad-9d8e-a9b230a3bcf5/rabbitmq/0.log" Nov 25 11:23:40 crc kubenswrapper[4854]: I1125 11:23:40.478324 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_bb562532-b4b2-42ad-9d8e-a9b230a3bcf5/setup-container/0.log" Nov 25 11:23:40 crc kubenswrapper[4854]: I1125 11:23:40.524999 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_ff5d28f8-deea-439b-abb6-7882641c046f/setup-container/0.log" Nov 25 11:23:40 crc kubenswrapper[4854]: I1125 11:23:40.807107 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_ff5d28f8-deea-439b-abb6-7882641c046f/rabbitmq/0.log" Nov 25 11:23:40 crc kubenswrapper[4854]: I1125 11:23:40.844924 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_ff5d28f8-deea-439b-abb6-7882641c046f/setup-container/0.log" Nov 25 11:23:40 crc kubenswrapper[4854]: I1125 11:23:40.874799 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-1_1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e/setup-container/0.log" Nov 25 11:23:41 crc kubenswrapper[4854]: I1125 11:23:41.113599 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-1_1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e/setup-container/0.log" Nov 25 11:23:41 crc kubenswrapper[4854]: I1125 11:23:41.201765 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-1_1fe6c6c2-abbf-4014-a82d-4103f3fc9e3e/rabbitmq/0.log" Nov 25 11:23:41 crc kubenswrapper[4854]: I1125 11:23:41.264242 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-2_fdceb63d-e366-47f2-954d-29730788adbb/setup-container/0.log" Nov 25 11:23:41 crc kubenswrapper[4854]: I1125 11:23:41.487204 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-2_fdceb63d-e366-47f2-954d-29730788adbb/setup-container/0.log" Nov 25 11:23:41 crc kubenswrapper[4854]: I1125 11:23:41.527468 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-87xs7_d9434a1f-53bb-45ac-996f-e2e32c7b447f/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 11:23:41 crc kubenswrapper[4854]: I1125 11:23:41.551065 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-2_fdceb63d-e366-47f2-954d-29730788adbb/rabbitmq/0.log" Nov 25 11:23:41 crc kubenswrapper[4854]: I1125 11:23:41.812502 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-54jhk_261f5f8f-12a4-4ef7-83f6-cc6a1f054279/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 11:23:41 crc kubenswrapper[4854]: I1125 11:23:41.843966 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-9fmjb_10f4bf87-5b7e-4077-8d81-13f86562549e/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 11:23:42 crc kubenswrapper[4854]: I1125 11:23:42.093123 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-97tkq_0eb416d3-dc11-438b-9f3b-c0660e4c81f8/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 11:23:42 crc kubenswrapper[4854]: I1125 11:23:42.179745 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-8pcrq_ed3268b7-e706-4ec0-9bb9-7e6c86414b2a/ssh-known-hosts-edpm-deployment/0.log" Nov 25 11:23:42 crc kubenswrapper[4854]: I1125 11:23:42.398140 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-5dbbcc5579-hmqdg_755d55c2-0eaa-4186-bd25-00e8c34166be/proxy-server/0.log" Nov 25 11:23:42 crc kubenswrapper[4854]: I1125 11:23:42.570114 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-5dbbcc5579-hmqdg_755d55c2-0eaa-4186-bd25-00e8c34166be/proxy-httpd/0.log" Nov 25 11:23:42 crc kubenswrapper[4854]: I1125 11:23:42.626392 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-m8t44_5a1d05ce-0c76-4823-a86f-004ea7655be9/swift-ring-rebalance/0.log" Nov 25 11:23:42 crc kubenswrapper[4854]: I1125 11:23:42.767946 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_eb6d8324-0633-4891-9a9c-f782e7cec247/account-auditor/0.log" Nov 25 11:23:42 crc kubenswrapper[4854]: I1125 11:23:42.885439 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_eb6d8324-0633-4891-9a9c-f782e7cec247/account-reaper/0.log" Nov 25 11:23:42 crc kubenswrapper[4854]: I1125 11:23:42.940288 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_eb6d8324-0633-4891-9a9c-f782e7cec247/account-server/0.log" Nov 25 11:23:42 crc kubenswrapper[4854]: I1125 11:23:42.972548 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_eb6d8324-0633-4891-9a9c-f782e7cec247/account-replicator/0.log" Nov 25 11:23:43 crc kubenswrapper[4854]: I1125 11:23:43.020476 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_eb6d8324-0633-4891-9a9c-f782e7cec247/container-auditor/0.log" Nov 25 11:23:43 crc kubenswrapper[4854]: I1125 11:23:43.179599 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_eb6d8324-0633-4891-9a9c-f782e7cec247/container-server/0.log" Nov 25 11:23:43 crc kubenswrapper[4854]: I1125 11:23:43.283111 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_eb6d8324-0633-4891-9a9c-f782e7cec247/container-updater/0.log" Nov 25 11:23:43 crc kubenswrapper[4854]: I1125 11:23:43.354170 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_eb6d8324-0633-4891-9a9c-f782e7cec247/container-replicator/0.log" Nov 25 11:23:43 crc kubenswrapper[4854]: I1125 11:23:43.418284 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_eb6d8324-0633-4891-9a9c-f782e7cec247/object-auditor/0.log" Nov 25 11:23:43 crc kubenswrapper[4854]: I1125 11:23:43.517048 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_eb6d8324-0633-4891-9a9c-f782e7cec247/object-expirer/0.log" Nov 25 11:23:43 crc kubenswrapper[4854]: I1125 11:23:43.618581 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_eb6d8324-0633-4891-9a9c-f782e7cec247/object-replicator/0.log" Nov 25 11:23:43 crc kubenswrapper[4854]: I1125 11:23:43.679876 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_eb6d8324-0633-4891-9a9c-f782e7cec247/object-server/0.log" Nov 25 11:23:43 crc kubenswrapper[4854]: I1125 11:23:43.682909 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_eb6d8324-0633-4891-9a9c-f782e7cec247/object-updater/0.log" Nov 25 11:23:43 crc kubenswrapper[4854]: I1125 11:23:43.824947 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_eb6d8324-0633-4891-9a9c-f782e7cec247/rsync/0.log" Nov 25 11:23:43 crc kubenswrapper[4854]: I1125 11:23:43.970631 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_eb6d8324-0633-4891-9a9c-f782e7cec247/swift-recon-cron/0.log" Nov 25 11:23:44 crc kubenswrapper[4854]: I1125 11:23:44.147846 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-drzgj_7f6c02cf-5044-461e-92d6-107c2e965a7a/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 11:23:44 crc kubenswrapper[4854]: I1125 11:23:44.348856 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-power-monitoring-edpm-deployment-openstack-edpm-zmghc_a09aa3eb-06ca-4f41-9d00-0b13b8bd8044/telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 11:23:45 crc kubenswrapper[4854]: I1125 11:23:45.042896 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_2610fa25-8ff3-403a-aea8-408f28a4fb9a/test-operator-logs-container/0.log" Nov 25 11:23:45 crc kubenswrapper[4854]: I1125 11:23:45.374319 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-xwnbk_0ae9a7d0-e882-4647-94e3-3c41a2b18d1b/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 11:23:45 crc kubenswrapper[4854]: I1125 11:23:45.441941 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_2323d0fa-ad38-4041-b209-029ace425aa7/tempest-tests-tempest-tests-runner/0.log" Nov 25 11:23:48 crc kubenswrapper[4854]: I1125 11:23:48.454175 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_2f639da7-7576-4274-94c5-4304b6af9b4d/memcached/0.log" Nov 25 11:23:49 crc kubenswrapper[4854]: I1125 11:23:49.237691 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-t4wc2" podUID="cba11762-4763-49ac-9d65-ff8b2d2d665a" containerName="registry-server" probeResult="failure" output=< Nov 25 11:23:49 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:23:49 crc kubenswrapper[4854]: > Nov 25 11:23:59 crc kubenswrapper[4854]: I1125 11:23:59.248905 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-t4wc2" podUID="cba11762-4763-49ac-9d65-ff8b2d2d665a" containerName="registry-server" probeResult="failure" output=< Nov 25 11:23:59 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:23:59 crc kubenswrapper[4854]: > Nov 25 11:24:08 crc kubenswrapper[4854]: I1125 11:24:08.241115 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-t4wc2" Nov 25 11:24:08 crc kubenswrapper[4854]: I1125 11:24:08.312955 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-t4wc2" Nov 25 11:24:08 crc kubenswrapper[4854]: I1125 11:24:08.487659 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-t4wc2"] Nov 25 11:24:09 crc kubenswrapper[4854]: I1125 11:24:09.398052 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-t4wc2" podUID="cba11762-4763-49ac-9d65-ff8b2d2d665a" containerName="registry-server" containerID="cri-o://1b3ab315649f088330cbbe1424612fbac65558d65417a37712b1442741dd3dc6" gracePeriod=2 Nov 25 11:24:10 crc kubenswrapper[4854]: I1125 11:24:10.411965 4854 generic.go:334] "Generic (PLEG): container finished" podID="cba11762-4763-49ac-9d65-ff8b2d2d665a" containerID="1b3ab315649f088330cbbe1424612fbac65558d65417a37712b1442741dd3dc6" exitCode=0 Nov 25 11:24:10 crc kubenswrapper[4854]: I1125 11:24:10.412045 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t4wc2" event={"ID":"cba11762-4763-49ac-9d65-ff8b2d2d665a","Type":"ContainerDied","Data":"1b3ab315649f088330cbbe1424612fbac65558d65417a37712b1442741dd3dc6"} Nov 25 11:24:11 crc kubenswrapper[4854]: I1125 11:24:11.060951 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t4wc2" Nov 25 11:24:11 crc kubenswrapper[4854]: I1125 11:24:11.163987 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x42tb\" (UniqueName: \"kubernetes.io/projected/cba11762-4763-49ac-9d65-ff8b2d2d665a-kube-api-access-x42tb\") pod \"cba11762-4763-49ac-9d65-ff8b2d2d665a\" (UID: \"cba11762-4763-49ac-9d65-ff8b2d2d665a\") " Nov 25 11:24:11 crc kubenswrapper[4854]: I1125 11:24:11.164130 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cba11762-4763-49ac-9d65-ff8b2d2d665a-utilities\") pod \"cba11762-4763-49ac-9d65-ff8b2d2d665a\" (UID: \"cba11762-4763-49ac-9d65-ff8b2d2d665a\") " Nov 25 11:24:11 crc kubenswrapper[4854]: I1125 11:24:11.164205 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cba11762-4763-49ac-9d65-ff8b2d2d665a-catalog-content\") pod \"cba11762-4763-49ac-9d65-ff8b2d2d665a\" (UID: \"cba11762-4763-49ac-9d65-ff8b2d2d665a\") " Nov 25 11:24:11 crc kubenswrapper[4854]: I1125 11:24:11.197454 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cba11762-4763-49ac-9d65-ff8b2d2d665a-utilities" (OuterVolumeSpecName: "utilities") pod "cba11762-4763-49ac-9d65-ff8b2d2d665a" (UID: "cba11762-4763-49ac-9d65-ff8b2d2d665a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:24:11 crc kubenswrapper[4854]: I1125 11:24:11.207366 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cba11762-4763-49ac-9d65-ff8b2d2d665a-kube-api-access-x42tb" (OuterVolumeSpecName: "kube-api-access-x42tb") pod "cba11762-4763-49ac-9d65-ff8b2d2d665a" (UID: "cba11762-4763-49ac-9d65-ff8b2d2d665a"). InnerVolumeSpecName "kube-api-access-x42tb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:24:11 crc kubenswrapper[4854]: I1125 11:24:11.271164 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x42tb\" (UniqueName: \"kubernetes.io/projected/cba11762-4763-49ac-9d65-ff8b2d2d665a-kube-api-access-x42tb\") on node \"crc\" DevicePath \"\"" Nov 25 11:24:11 crc kubenswrapper[4854]: I1125 11:24:11.271199 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cba11762-4763-49ac-9d65-ff8b2d2d665a-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 11:24:11 crc kubenswrapper[4854]: I1125 11:24:11.372020 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cba11762-4763-49ac-9d65-ff8b2d2d665a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cba11762-4763-49ac-9d65-ff8b2d2d665a" (UID: "cba11762-4763-49ac-9d65-ff8b2d2d665a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:24:11 crc kubenswrapper[4854]: I1125 11:24:11.372648 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cba11762-4763-49ac-9d65-ff8b2d2d665a-catalog-content\") pod \"cba11762-4763-49ac-9d65-ff8b2d2d665a\" (UID: \"cba11762-4763-49ac-9d65-ff8b2d2d665a\") " Nov 25 11:24:11 crc kubenswrapper[4854]: W1125 11:24:11.391487 4854 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/cba11762-4763-49ac-9d65-ff8b2d2d665a/volumes/kubernetes.io~empty-dir/catalog-content Nov 25 11:24:11 crc kubenswrapper[4854]: I1125 11:24:11.391767 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cba11762-4763-49ac-9d65-ff8b2d2d665a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cba11762-4763-49ac-9d65-ff8b2d2d665a" (UID: "cba11762-4763-49ac-9d65-ff8b2d2d665a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:24:11 crc kubenswrapper[4854]: I1125 11:24:11.426595 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t4wc2" event={"ID":"cba11762-4763-49ac-9d65-ff8b2d2d665a","Type":"ContainerDied","Data":"ea2205c5474e14d2deecec772fa037f7302f3b9b35743e059267da87979200d5"} Nov 25 11:24:11 crc kubenswrapper[4854]: I1125 11:24:11.427765 4854 scope.go:117] "RemoveContainer" containerID="1b3ab315649f088330cbbe1424612fbac65558d65417a37712b1442741dd3dc6" Nov 25 11:24:11 crc kubenswrapper[4854]: I1125 11:24:11.426862 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t4wc2" Nov 25 11:24:11 crc kubenswrapper[4854]: I1125 11:24:11.468873 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-t4wc2"] Nov 25 11:24:11 crc kubenswrapper[4854]: I1125 11:24:11.475581 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cba11762-4763-49ac-9d65-ff8b2d2d665a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 11:24:11 crc kubenswrapper[4854]: I1125 11:24:11.484106 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-t4wc2"] Nov 25 11:24:11 crc kubenswrapper[4854]: I1125 11:24:11.515409 4854 scope.go:117] "RemoveContainer" containerID="3b8de68a5b2bec321f50434dc0e669b2b39363f669c2b0d4132a44f8b91c75cb" Nov 25 11:24:11 crc kubenswrapper[4854]: I1125 11:24:11.543496 4854 scope.go:117] "RemoveContainer" containerID="55d8b45209c7c386ad63c4b9b8755d934a6b865c711ad9d6926a0f8421eea524" Nov 25 11:24:13 crc kubenswrapper[4854]: I1125 11:24:13.027497 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cba11762-4763-49ac-9d65-ff8b2d2d665a" path="/var/lib/kubelet/pods/cba11762-4763-49ac-9d65-ff8b2d2d665a/volumes" Nov 25 11:24:16 crc kubenswrapper[4854]: I1125 11:24:16.262258 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_09934228e4d1df511f36c8a9f9f2f97d90660d042fb18f75ceeb82c7f9v24mk_d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7/util/0.log" Nov 25 11:24:16 crc kubenswrapper[4854]: I1125 11:24:16.512319 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_09934228e4d1df511f36c8a9f9f2f97d90660d042fb18f75ceeb82c7f9v24mk_d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7/pull/0.log" Nov 25 11:24:16 crc kubenswrapper[4854]: I1125 11:24:16.516849 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_09934228e4d1df511f36c8a9f9f2f97d90660d042fb18f75ceeb82c7f9v24mk_d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7/pull/0.log" Nov 25 11:24:16 crc kubenswrapper[4854]: I1125 11:24:16.533811 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_09934228e4d1df511f36c8a9f9f2f97d90660d042fb18f75ceeb82c7f9v24mk_d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7/util/0.log" Nov 25 11:24:16 crc kubenswrapper[4854]: I1125 11:24:16.803503 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_09934228e4d1df511f36c8a9f9f2f97d90660d042fb18f75ceeb82c7f9v24mk_d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7/util/0.log" Nov 25 11:24:16 crc kubenswrapper[4854]: I1125 11:24:16.854127 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_09934228e4d1df511f36c8a9f9f2f97d90660d042fb18f75ceeb82c7f9v24mk_d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7/pull/0.log" Nov 25 11:24:16 crc kubenswrapper[4854]: I1125 11:24:16.894665 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_09934228e4d1df511f36c8a9f9f2f97d90660d042fb18f75ceeb82c7f9v24mk_d0a2fc71-8cc3-46c4-b9a5-2d796c5916d7/extract/0.log" Nov 25 11:24:17 crc kubenswrapper[4854]: I1125 11:24:17.024521 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-86dc4d89c8-pvjbb_df2830c6-0e67-4aea-b14d-101a5323617b/kube-rbac-proxy/0.log" Nov 25 11:24:17 crc kubenswrapper[4854]: I1125 11:24:17.170845 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-79856dc55c-vfv6r_e0afee56-5768-44f9-af4d-da496a95ae39/kube-rbac-proxy/0.log" Nov 25 11:24:17 crc kubenswrapper[4854]: I1125 11:24:17.185026 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-86dc4d89c8-pvjbb_df2830c6-0e67-4aea-b14d-101a5323617b/manager/0.log" Nov 25 11:24:17 crc kubenswrapper[4854]: I1125 11:24:17.287694 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-79856dc55c-vfv6r_e0afee56-5768-44f9-af4d-da496a95ae39/manager/0.log" Nov 25 11:24:17 crc kubenswrapper[4854]: I1125 11:24:17.356535 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-7d695c9b56-bjjdt_5a9dc90e-3728-42c9-897c-7b28035196bd/kube-rbac-proxy/0.log" Nov 25 11:24:17 crc kubenswrapper[4854]: I1125 11:24:17.375495 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-7d695c9b56-bjjdt_5a9dc90e-3728-42c9-897c-7b28035196bd/manager/0.log" Nov 25 11:24:17 crc kubenswrapper[4854]: I1125 11:24:17.554192 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-68b95954c9-n99lm_5ed548fa-16b5-4733-b798-a0819bc8e77d/kube-rbac-proxy/0.log" Nov 25 11:24:17 crc kubenswrapper[4854]: I1125 11:24:17.653181 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-68b95954c9-n99lm_5ed548fa-16b5-4733-b798-a0819bc8e77d/manager/0.log" Nov 25 11:24:17 crc kubenswrapper[4854]: I1125 11:24:17.784526 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-774b86978c-zp5bz_1da6b327-2319-4a92-9555-736d992a3348/kube-rbac-proxy/0.log" Nov 25 11:24:17 crc kubenswrapper[4854]: I1125 11:24:17.917186 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c9694994-jx54m_13c658f9-13c2-43d5-9b8a-d30484e5943f/kube-rbac-proxy/0.log" Nov 25 11:24:17 crc kubenswrapper[4854]: I1125 11:24:17.920289 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-774b86978c-zp5bz_1da6b327-2319-4a92-9555-736d992a3348/manager/0.log" Nov 25 11:24:18 crc kubenswrapper[4854]: I1125 11:24:18.055058 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c9694994-jx54m_13c658f9-13c2-43d5-9b8a-d30484e5943f/manager/0.log" Nov 25 11:24:18 crc kubenswrapper[4854]: I1125 11:24:18.180562 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-d5cc86f4b-n9bk6_77de3154-620e-407a-97e9-94d3dd90ced7/kube-rbac-proxy/0.log" Nov 25 11:24:18 crc kubenswrapper[4854]: I1125 11:24:18.284271 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-5bfcdc958c-7l4fm_5c0186e4-d72a-4281-ab41-d012f2d4d775/kube-rbac-proxy/0.log" Nov 25 11:24:18 crc kubenswrapper[4854]: I1125 11:24:18.415576 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-d5cc86f4b-n9bk6_77de3154-620e-407a-97e9-94d3dd90ced7/manager/0.log" Nov 25 11:24:18 crc kubenswrapper[4854]: I1125 11:24:18.435637 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-5bfcdc958c-7l4fm_5c0186e4-d72a-4281-ab41-d012f2d4d775/manager/0.log" Nov 25 11:24:18 crc kubenswrapper[4854]: I1125 11:24:18.592811 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-748dc6576f-sks98_d51e017c-9c85-443a-a5b9-b8b8969bb019/kube-rbac-proxy/0.log" Nov 25 11:24:18 crc kubenswrapper[4854]: I1125 11:24:18.683183 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-748dc6576f-sks98_d51e017c-9c85-443a-a5b9-b8b8969bb019/manager/0.log" Nov 25 11:24:18 crc kubenswrapper[4854]: I1125 11:24:18.765116 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58bb8d67cc-4tvd5_7cff163e-525b-4280-ab48-35eb6d6dd242/kube-rbac-proxy/0.log" Nov 25 11:24:18 crc kubenswrapper[4854]: I1125 11:24:18.799051 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58bb8d67cc-4tvd5_7cff163e-525b-4280-ab48-35eb6d6dd242/manager/0.log" Nov 25 11:24:18 crc kubenswrapper[4854]: I1125 11:24:18.889610 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-cb6c4fdb7-dlq5q_10550c56-b051-4d65-a13c-3854e40d2869/kube-rbac-proxy/0.log" Nov 25 11:24:19 crc kubenswrapper[4854]: I1125 11:24:19.017403 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-cb6c4fdb7-dlq5q_10550c56-b051-4d65-a13c-3854e40d2869/manager/0.log" Nov 25 11:24:19 crc kubenswrapper[4854]: I1125 11:24:19.210401 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-7c57c8bbc4-9nqtc_2b0d30d6-6825-4d29-b5bb-9ea86f790b6f/manager/0.log" Nov 25 11:24:19 crc kubenswrapper[4854]: I1125 11:24:19.215839 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-7c57c8bbc4-9nqtc_2b0d30d6-6825-4d29-b5bb-9ea86f790b6f/kube-rbac-proxy/0.log" Nov 25 11:24:19 crc kubenswrapper[4854]: I1125 11:24:19.290195 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-7bgzh_e3e51685-6e25-4bcd-8194-4faf0947962a/kube-rbac-proxy/0.log" Nov 25 11:24:19 crc kubenswrapper[4854]: I1125 11:24:19.505322 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-fd75fd47d-wcjpf_bcafa8f9-8abb-4015-8afc-6767b0485ad8/kube-rbac-proxy/0.log" Nov 25 11:24:19 crc kubenswrapper[4854]: I1125 11:24:19.514700 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-7bgzh_e3e51685-6e25-4bcd-8194-4faf0947962a/manager/0.log" Nov 25 11:24:19 crc kubenswrapper[4854]: I1125 11:24:19.569921 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-fd75fd47d-wcjpf_bcafa8f9-8abb-4015-8afc-6767b0485ad8/manager/0.log" Nov 25 11:24:19 crc kubenswrapper[4854]: I1125 11:24:19.729879 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-544b9bb9-lcbwz_0ef84c87-7162-45bb-9622-48b2b37e50bd/kube-rbac-proxy/0.log" Nov 25 11:24:19 crc kubenswrapper[4854]: I1125 11:24:19.771526 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-544b9bb9-lcbwz_0ef84c87-7162-45bb-9622-48b2b37e50bd/manager/0.log" Nov 25 11:24:20 crc kubenswrapper[4854]: I1125 11:24:20.195525 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-68sqj_0aca6f91-76ff-47fe-957a-d03494fa8d99/registry-server/0.log" Nov 25 11:24:20 crc kubenswrapper[4854]: I1125 11:24:20.205130 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-674d4d8cb8-wcrmg_61c604f4-d038-4563-b7a9-d14831788526/operator/0.log" Nov 25 11:24:20 crc kubenswrapper[4854]: I1125 11:24:20.479328 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-66cf5c67ff-tcblk_62c72ef0-d6f3-4b75-aa57-43d934de39e9/kube-rbac-proxy/0.log" Nov 25 11:24:20 crc kubenswrapper[4854]: I1125 11:24:20.587346 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-66cf5c67ff-tcblk_62c72ef0-d6f3-4b75-aa57-43d934de39e9/manager/0.log" Nov 25 11:24:20 crc kubenswrapper[4854]: I1125 11:24:20.642304 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5db546f9d9-v88zs_c0ebacaa-bc16-4876-87cb-011f523a59a4/kube-rbac-proxy/0.log" Nov 25 11:24:20 crc kubenswrapper[4854]: I1125 11:24:20.765937 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5db546f9d9-v88zs_c0ebacaa-bc16-4876-87cb-011f523a59a4/manager/0.log" Nov 25 11:24:20 crc kubenswrapper[4854]: I1125 11:24:20.969257 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-gwfmz_3879e5ff-7566-4cd8-bcac-e8c07a79f965/operator/0.log" Nov 25 11:24:21 crc kubenswrapper[4854]: I1125 11:24:21.043682 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-6fdc4fcf86-rwp8j_17936560-9d8d-4c29-b12a-451abdc2787a/kube-rbac-proxy/0.log" Nov 25 11:24:21 crc kubenswrapper[4854]: I1125 11:24:21.161297 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-6fdc4fcf86-rwp8j_17936560-9d8d-4c29-b12a-451abdc2787a/manager/0.log" Nov 25 11:24:21 crc kubenswrapper[4854]: I1125 11:24:21.293945 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-67b89c8998-d98c4_91d4e118-8133-4531-8e70-afb240453f11/kube-rbac-proxy/0.log" Nov 25 11:24:23 crc kubenswrapper[4854]: I1125 11:24:23.079227 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-864885998-9pstc_0e7943a0-c710-4440-a8be-932c12cfd4de/kube-rbac-proxy/0.log" Nov 25 11:24:23 crc kubenswrapper[4854]: I1125 11:24:23.084765 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cb74df96-vddrp_0e528244-b216-46fa-95a8-8b0faf6a50df/kube-rbac-proxy/0.log" Nov 25 11:24:23 crc kubenswrapper[4854]: I1125 11:24:23.122444 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cb74df96-vddrp_0e528244-b216-46fa-95a8-8b0faf6a50df/manager/0.log" Nov 25 11:24:23 crc kubenswrapper[4854]: I1125 11:24:23.130218 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/certified-operators-h7fm2" podUID="30234f04-abcc-479e-96fc-40bfbce02d59" containerName="registry-server" probeResult="failure" output=< Nov 25 11:24:23 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:24:23 crc kubenswrapper[4854]: > Nov 25 11:24:23 crc kubenswrapper[4854]: I1125 11:24:23.149233 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-848ff5c487-2kbtl_21f043af-1d20-48fd-a8eb-40cdbee6ab8c/manager/0.log" Nov 25 11:24:23 crc kubenswrapper[4854]: I1125 11:24:23.362803 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-864885998-9pstc_0e7943a0-c710-4440-a8be-932c12cfd4de/manager/0.log" Nov 25 11:24:23 crc kubenswrapper[4854]: I1125 11:24:23.381163 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-67b89c8998-d98c4_91d4e118-8133-4531-8e70-afb240453f11/manager/0.log" Nov 25 11:24:40 crc kubenswrapper[4854]: I1125 11:24:40.906030 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-chcz8_b236ed1d-4b1e-4910-9df1-0db7353a28c5/control-plane-machine-set-operator/0.log" Nov 25 11:24:41 crc kubenswrapper[4854]: I1125 11:24:41.075840 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-vhjzg_c464b1b6-988b-430b-b6ac-6b5110888de8/kube-rbac-proxy/0.log" Nov 25 11:24:41 crc kubenswrapper[4854]: I1125 11:24:41.096378 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-vhjzg_c464b1b6-988b-430b-b6ac-6b5110888de8/machine-api-operator/0.log" Nov 25 11:24:53 crc kubenswrapper[4854]: I1125 11:24:53.638284 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-g64kp_c1e17925-72e5-4e01-b5dd-b12de1f249eb/cert-manager-controller/0.log" Nov 25 11:24:53 crc kubenswrapper[4854]: I1125 11:24:53.823692 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-tfnst_2666cb9d-61b4-435d-b44f-debac56efa9f/cert-manager-cainjector/0.log" Nov 25 11:24:53 crc kubenswrapper[4854]: I1125 11:24:53.883869 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-h46p2_44c83449-45d4-4c76-a21b-e91f947cf265/cert-manager-webhook/0.log" Nov 25 11:24:55 crc kubenswrapper[4854]: I1125 11:24:55.039418 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 11:24:55 crc kubenswrapper[4854]: I1125 11:24:55.039741 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 11:25:07 crc kubenswrapper[4854]: I1125 11:25:07.078379 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5874bd7bc5-2tmw9_772f9cbd-1f86-4502-b501-bb781c0f11fe/nmstate-console-plugin/0.log" Nov 25 11:25:07 crc kubenswrapper[4854]: I1125 11:25:07.331922 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-xq8dt_22559aba-4245-4836-9ea5-9edad39725c5/nmstate-handler/0.log" Nov 25 11:25:07 crc kubenswrapper[4854]: I1125 11:25:07.357722 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-8xhgf_459bf834-bbae-4e88-812d-0f7f4f2560f5/nmstate-metrics/0.log" Nov 25 11:25:07 crc kubenswrapper[4854]: I1125 11:25:07.358171 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-8xhgf_459bf834-bbae-4e88-812d-0f7f4f2560f5/kube-rbac-proxy/0.log" Nov 25 11:25:07 crc kubenswrapper[4854]: I1125 11:25:07.522562 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-557fdffb88-f8dcl_c54d3a2c-5361-4407-bd50-70678088146a/nmstate-operator/0.log" Nov 25 11:25:07 crc kubenswrapper[4854]: I1125 11:25:07.567561 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-8hkpp_f1e621b9-4a85-435a-af8b-2e17f4e74bef/nmstate-webhook/0.log" Nov 25 11:25:20 crc kubenswrapper[4854]: I1125 11:25:20.391286 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-858c6c7dc8-bdkz2_f41b420b-a51d-40b7-9a74-5930db508da8/kube-rbac-proxy/0.log" Nov 25 11:25:20 crc kubenswrapper[4854]: I1125 11:25:20.412080 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-858c6c7dc8-bdkz2_f41b420b-a51d-40b7-9a74-5930db508da8/manager/0.log" Nov 25 11:25:25 crc kubenswrapper[4854]: I1125 11:25:25.029865 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 11:25:25 crc kubenswrapper[4854]: I1125 11:25:25.030373 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 11:25:35 crc kubenswrapper[4854]: I1125 11:25:35.080210 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_cluster-logging-operator-ff9846bd-p8rwt_729e9fb0-9e7d-468b-8a41-df4c73b51607/cluster-logging-operator/0.log" Nov 25 11:25:35 crc kubenswrapper[4854]: I1125 11:25:35.217715 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_collector-wg25b_00b58efa-c7d1-4ccb-93e1-15e8813f9074/collector/0.log" Nov 25 11:25:35 crc kubenswrapper[4854]: I1125 11:25:35.330705 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-compactor-0_9578161a-d74b-48f8-9e3a-4ed85b4bb673/loki-compactor/0.log" Nov 25 11:25:35 crc kubenswrapper[4854]: I1125 11:25:35.436915 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-distributor-76cc67bf56-9jxqv_74f7ae80-fd76-4c73-85e8-a886a203733d/loki-distributor/0.log" Nov 25 11:25:35 crc kubenswrapper[4854]: I1125 11:25:35.519321 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-7bb4c68556-4mv8n_3aad27bd-694f-4ee7-a0a3-daf16b8d42e5/gateway/0.log" Nov 25 11:25:35 crc kubenswrapper[4854]: I1125 11:25:35.546184 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-7bb4c68556-4mv8n_3aad27bd-694f-4ee7-a0a3-daf16b8d42e5/opa/0.log" Nov 25 11:25:35 crc kubenswrapper[4854]: I1125 11:25:35.781034 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-7bb4c68556-bj5ps_44898682-d509-4f9b-b054-cd1df11c603d/gateway/0.log" Nov 25 11:25:35 crc kubenswrapper[4854]: I1125 11:25:35.833661 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-7bb4c68556-bj5ps_44898682-d509-4f9b-b054-cd1df11c603d/opa/0.log" Nov 25 11:25:35 crc kubenswrapper[4854]: I1125 11:25:35.858263 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-index-gateway-0_a72bbae8-a7e3-4605-b2b7-f9254913f1e5/loki-index-gateway/0.log" Nov 25 11:25:36 crc kubenswrapper[4854]: I1125 11:25:36.458727 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-ingester-0_3ac43dd4-dc0c-4974-8521-11073254c3cd/loki-ingester/0.log" Nov 25 11:25:36 crc kubenswrapper[4854]: I1125 11:25:36.510888 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-querier-5895d59bb8-dzsk5_d6c5fbf6-e379-49e2-a402-e36d171150dd/loki-querier/0.log" Nov 25 11:25:36 crc kubenswrapper[4854]: I1125 11:25:36.805613 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-query-frontend-84558f7c9f-9xgzz_111ecac4-100d-47df-97d7-23f5c048c7d5/loki-query-frontend/0.log" Nov 25 11:25:38 crc kubenswrapper[4854]: I1125 11:25:38.827351 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-rq74j"] Nov 25 11:25:38 crc kubenswrapper[4854]: E1125 11:25:38.828583 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cba11762-4763-49ac-9d65-ff8b2d2d665a" containerName="extract-utilities" Nov 25 11:25:38 crc kubenswrapper[4854]: I1125 11:25:38.828603 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="cba11762-4763-49ac-9d65-ff8b2d2d665a" containerName="extract-utilities" Nov 25 11:25:38 crc kubenswrapper[4854]: E1125 11:25:38.828702 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cba11762-4763-49ac-9d65-ff8b2d2d665a" containerName="registry-server" Nov 25 11:25:38 crc kubenswrapper[4854]: I1125 11:25:38.828715 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="cba11762-4763-49ac-9d65-ff8b2d2d665a" containerName="registry-server" Nov 25 11:25:38 crc kubenswrapper[4854]: E1125 11:25:38.828748 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cba11762-4763-49ac-9d65-ff8b2d2d665a" containerName="extract-content" Nov 25 11:25:38 crc kubenswrapper[4854]: I1125 11:25:38.828755 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="cba11762-4763-49ac-9d65-ff8b2d2d665a" containerName="extract-content" Nov 25 11:25:38 crc kubenswrapper[4854]: I1125 11:25:38.829080 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="cba11762-4763-49ac-9d65-ff8b2d2d665a" containerName="registry-server" Nov 25 11:25:38 crc kubenswrapper[4854]: I1125 11:25:38.831279 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rq74j" Nov 25 11:25:38 crc kubenswrapper[4854]: I1125 11:25:38.845645 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rq74j"] Nov 25 11:25:38 crc kubenswrapper[4854]: I1125 11:25:38.861407 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zpq56\" (UniqueName: \"kubernetes.io/projected/51c8c848-1585-4632-9e36-f39a0f5a7074-kube-api-access-zpq56\") pod \"certified-operators-rq74j\" (UID: \"51c8c848-1585-4632-9e36-f39a0f5a7074\") " pod="openshift-marketplace/certified-operators-rq74j" Nov 25 11:25:38 crc kubenswrapper[4854]: I1125 11:25:38.861486 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51c8c848-1585-4632-9e36-f39a0f5a7074-catalog-content\") pod \"certified-operators-rq74j\" (UID: \"51c8c848-1585-4632-9e36-f39a0f5a7074\") " pod="openshift-marketplace/certified-operators-rq74j" Nov 25 11:25:38 crc kubenswrapper[4854]: I1125 11:25:38.861557 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51c8c848-1585-4632-9e36-f39a0f5a7074-utilities\") pod \"certified-operators-rq74j\" (UID: \"51c8c848-1585-4632-9e36-f39a0f5a7074\") " pod="openshift-marketplace/certified-operators-rq74j" Nov 25 11:25:38 crc kubenswrapper[4854]: I1125 11:25:38.963997 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zpq56\" (UniqueName: \"kubernetes.io/projected/51c8c848-1585-4632-9e36-f39a0f5a7074-kube-api-access-zpq56\") pod \"certified-operators-rq74j\" (UID: \"51c8c848-1585-4632-9e36-f39a0f5a7074\") " pod="openshift-marketplace/certified-operators-rq74j" Nov 25 11:25:38 crc kubenswrapper[4854]: I1125 11:25:38.964085 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51c8c848-1585-4632-9e36-f39a0f5a7074-catalog-content\") pod \"certified-operators-rq74j\" (UID: \"51c8c848-1585-4632-9e36-f39a0f5a7074\") " pod="openshift-marketplace/certified-operators-rq74j" Nov 25 11:25:38 crc kubenswrapper[4854]: I1125 11:25:38.964182 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51c8c848-1585-4632-9e36-f39a0f5a7074-utilities\") pod \"certified-operators-rq74j\" (UID: \"51c8c848-1585-4632-9e36-f39a0f5a7074\") " pod="openshift-marketplace/certified-operators-rq74j" Nov 25 11:25:38 crc kubenswrapper[4854]: I1125 11:25:38.964884 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51c8c848-1585-4632-9e36-f39a0f5a7074-catalog-content\") pod \"certified-operators-rq74j\" (UID: \"51c8c848-1585-4632-9e36-f39a0f5a7074\") " pod="openshift-marketplace/certified-operators-rq74j" Nov 25 11:25:38 crc kubenswrapper[4854]: I1125 11:25:38.964984 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51c8c848-1585-4632-9e36-f39a0f5a7074-utilities\") pod \"certified-operators-rq74j\" (UID: \"51c8c848-1585-4632-9e36-f39a0f5a7074\") " pod="openshift-marketplace/certified-operators-rq74j" Nov 25 11:25:39 crc kubenswrapper[4854]: I1125 11:25:38.990656 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zpq56\" (UniqueName: \"kubernetes.io/projected/51c8c848-1585-4632-9e36-f39a0f5a7074-kube-api-access-zpq56\") pod \"certified-operators-rq74j\" (UID: \"51c8c848-1585-4632-9e36-f39a0f5a7074\") " pod="openshift-marketplace/certified-operators-rq74j" Nov 25 11:25:39 crc kubenswrapper[4854]: I1125 11:25:39.175696 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rq74j" Nov 25 11:25:42 crc kubenswrapper[4854]: I1125 11:25:42.161205 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rq74j"] Nov 25 11:25:43 crc kubenswrapper[4854]: I1125 11:25:43.050185 4854 generic.go:334] "Generic (PLEG): container finished" podID="51c8c848-1585-4632-9e36-f39a0f5a7074" containerID="41d9ead3f6b899c4ed1fb06c5a0944f2dc43f664b9c507c9d6781560e6ba055d" exitCode=0 Nov 25 11:25:43 crc kubenswrapper[4854]: I1125 11:25:43.050268 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rq74j" event={"ID":"51c8c848-1585-4632-9e36-f39a0f5a7074","Type":"ContainerDied","Data":"41d9ead3f6b899c4ed1fb06c5a0944f2dc43f664b9c507c9d6781560e6ba055d"} Nov 25 11:25:43 crc kubenswrapper[4854]: I1125 11:25:43.050941 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rq74j" event={"ID":"51c8c848-1585-4632-9e36-f39a0f5a7074","Type":"ContainerStarted","Data":"fcdc5646cde745b671b281a0cc3cfa5e0e2eedc66282cd55073a527f09b1a3be"} Nov 25 11:25:43 crc kubenswrapper[4854]: I1125 11:25:43.055013 4854 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 11:25:49 crc kubenswrapper[4854]: I1125 11:25:49.156844 4854 generic.go:334] "Generic (PLEG): container finished" podID="51c8c848-1585-4632-9e36-f39a0f5a7074" containerID="68916c48e23f4995ed2d5e83ca5baf65435f061ed94ce9f2229eb9e99f859c06" exitCode=0 Nov 25 11:25:49 crc kubenswrapper[4854]: I1125 11:25:49.156977 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rq74j" event={"ID":"51c8c848-1585-4632-9e36-f39a0f5a7074","Type":"ContainerDied","Data":"68916c48e23f4995ed2d5e83ca5baf65435f061ed94ce9f2229eb9e99f859c06"} Nov 25 11:25:50 crc kubenswrapper[4854]: I1125 11:25:50.170790 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rq74j" event={"ID":"51c8c848-1585-4632-9e36-f39a0f5a7074","Type":"ContainerStarted","Data":"f9bb36c7cc21a69966fe816fb512345c3b47fa75c68eae759a68d2e0cc11dbd3"} Nov 25 11:25:50 crc kubenswrapper[4854]: I1125 11:25:50.199009 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-rq74j" podStartSLOduration=5.436462492 podStartE2EDuration="12.19898954s" podCreationTimestamp="2025-11-25 11:25:38 +0000 UTC" firstStartedPulling="2025-11-25 11:25:43.052738852 +0000 UTC m=+6548.905732228" lastFinishedPulling="2025-11-25 11:25:49.8152659 +0000 UTC m=+6555.668259276" observedRunningTime="2025-11-25 11:25:50.189325308 +0000 UTC m=+6556.042318684" watchObservedRunningTime="2025-11-25 11:25:50.19898954 +0000 UTC m=+6556.051982916" Nov 25 11:25:53 crc kubenswrapper[4854]: I1125 11:25:53.440822 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-dx66b_52bdf1ca-71ae-4bb7-a4e9-593da3d912cf/kube-rbac-proxy/0.log" Nov 25 11:25:53 crc kubenswrapper[4854]: I1125 11:25:53.693146 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-f9q69_a3c67adc-f296-4e37-a023-1c478d8abcd7/cp-frr-files/0.log" Nov 25 11:25:53 crc kubenswrapper[4854]: I1125 11:25:53.906093 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-f9q69_a3c67adc-f296-4e37-a023-1c478d8abcd7/cp-metrics/0.log" Nov 25 11:25:53 crc kubenswrapper[4854]: I1125 11:25:53.931287 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-f9q69_a3c67adc-f296-4e37-a023-1c478d8abcd7/cp-frr-files/0.log" Nov 25 11:25:53 crc kubenswrapper[4854]: I1125 11:25:53.938607 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-f9q69_a3c67adc-f296-4e37-a023-1c478d8abcd7/cp-reloader/0.log" Nov 25 11:25:54 crc kubenswrapper[4854]: I1125 11:25:54.100247 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-f9q69_a3c67adc-f296-4e37-a023-1c478d8abcd7/cp-reloader/0.log" Nov 25 11:25:54 crc kubenswrapper[4854]: I1125 11:25:54.314839 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-f9q69_a3c67adc-f296-4e37-a023-1c478d8abcd7/cp-reloader/0.log" Nov 25 11:25:54 crc kubenswrapper[4854]: I1125 11:25:54.357274 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-f9q69_a3c67adc-f296-4e37-a023-1c478d8abcd7/cp-frr-files/0.log" Nov 25 11:25:54 crc kubenswrapper[4854]: I1125 11:25:54.398153 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-dx66b_52bdf1ca-71ae-4bb7-a4e9-593da3d912cf/controller/0.log" Nov 25 11:25:54 crc kubenswrapper[4854]: I1125 11:25:54.432487 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-f9q69_a3c67adc-f296-4e37-a023-1c478d8abcd7/cp-metrics/0.log" Nov 25 11:25:54 crc kubenswrapper[4854]: I1125 11:25:54.576240 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-f9q69_a3c67adc-f296-4e37-a023-1c478d8abcd7/cp-metrics/0.log" Nov 25 11:25:54 crc kubenswrapper[4854]: I1125 11:25:54.738262 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-f9q69_a3c67adc-f296-4e37-a023-1c478d8abcd7/cp-frr-files/0.log" Nov 25 11:25:54 crc kubenswrapper[4854]: I1125 11:25:54.785812 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-f9q69_a3c67adc-f296-4e37-a023-1c478d8abcd7/cp-metrics/0.log" Nov 25 11:25:54 crc kubenswrapper[4854]: I1125 11:25:54.812140 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-f9q69_a3c67adc-f296-4e37-a023-1c478d8abcd7/cp-reloader/0.log" Nov 25 11:25:54 crc kubenswrapper[4854]: I1125 11:25:54.861473 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-f9q69_a3c67adc-f296-4e37-a023-1c478d8abcd7/controller/0.log" Nov 25 11:25:54 crc kubenswrapper[4854]: I1125 11:25:54.987893 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-f9q69_a3c67adc-f296-4e37-a023-1c478d8abcd7/frr-metrics/0.log" Nov 25 11:25:55 crc kubenswrapper[4854]: I1125 11:25:55.012803 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-f9q69_a3c67adc-f296-4e37-a023-1c478d8abcd7/kube-rbac-proxy/0.log" Nov 25 11:25:55 crc kubenswrapper[4854]: I1125 11:25:55.028364 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 11:25:55 crc kubenswrapper[4854]: I1125 11:25:55.028542 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 11:25:55 crc kubenswrapper[4854]: I1125 11:25:55.028630 4854 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" Nov 25 11:25:55 crc kubenswrapper[4854]: I1125 11:25:55.029508 4854 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7f7b0bfefe09066bd006d68161d8598a1327dba11d008e89a0ee63321e865909"} pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 11:25:55 crc kubenswrapper[4854]: I1125 11:25:55.029621 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" containerID="cri-o://7f7b0bfefe09066bd006d68161d8598a1327dba11d008e89a0ee63321e865909" gracePeriod=600 Nov 25 11:25:55 crc kubenswrapper[4854]: E1125 11:25:55.185101 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:25:55 crc kubenswrapper[4854]: I1125 11:25:55.226994 4854 generic.go:334] "Generic (PLEG): container finished" podID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerID="7f7b0bfefe09066bd006d68161d8598a1327dba11d008e89a0ee63321e865909" exitCode=0 Nov 25 11:25:55 crc kubenswrapper[4854]: I1125 11:25:55.227039 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerDied","Data":"7f7b0bfefe09066bd006d68161d8598a1327dba11d008e89a0ee63321e865909"} Nov 25 11:25:55 crc kubenswrapper[4854]: I1125 11:25:55.227070 4854 scope.go:117] "RemoveContainer" containerID="2c8514cc6c40a71fc410000bbe6ceecaa6fa777b7603a916034922fed06f04fc" Nov 25 11:25:55 crc kubenswrapper[4854]: I1125 11:25:55.231819 4854 scope.go:117] "RemoveContainer" containerID="7f7b0bfefe09066bd006d68161d8598a1327dba11d008e89a0ee63321e865909" Nov 25 11:25:55 crc kubenswrapper[4854]: E1125 11:25:55.234163 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:25:55 crc kubenswrapper[4854]: I1125 11:25:55.255384 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-f9q69_a3c67adc-f296-4e37-a023-1c478d8abcd7/kube-rbac-proxy-frr/0.log" Nov 25 11:25:55 crc kubenswrapper[4854]: I1125 11:25:55.377787 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-f9q69_a3c67adc-f296-4e37-a023-1c478d8abcd7/reloader/0.log" Nov 25 11:25:55 crc kubenswrapper[4854]: I1125 11:25:55.558030 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-52mph_570de8a9-89ac-41e7-973c-d76485b7f41d/frr-k8s-webhook-server/0.log" Nov 25 11:25:55 crc kubenswrapper[4854]: I1125 11:25:55.656767 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-687945d4d9-n2f6z_493a8b3d-2aca-4cc2-a2f1-158ed88ca234/manager/0.log" Nov 25 11:25:55 crc kubenswrapper[4854]: I1125 11:25:55.951769 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-fb9df45c-vx6gh_ed8237ae-873c-431e-9283-3a5a4dc63333/webhook-server/0.log" Nov 25 11:25:56 crc kubenswrapper[4854]: I1125 11:25:56.493615 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-zlgw9_886977c7-abf1-4a92-8d3f-3c14fc0ea0f1/kube-rbac-proxy/0.log" Nov 25 11:25:56 crc kubenswrapper[4854]: I1125 11:25:56.846929 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-f9q69_a3c67adc-f296-4e37-a023-1c478d8abcd7/frr/0.log" Nov 25 11:25:57 crc kubenswrapper[4854]: I1125 11:25:57.147365 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-zlgw9_886977c7-abf1-4a92-8d3f-3c14fc0ea0f1/speaker/0.log" Nov 25 11:25:59 crc kubenswrapper[4854]: I1125 11:25:59.176291 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-rq74j" Nov 25 11:25:59 crc kubenswrapper[4854]: I1125 11:25:59.176700 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-rq74j" Nov 25 11:25:59 crc kubenswrapper[4854]: I1125 11:25:59.231690 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-rq74j" Nov 25 11:25:59 crc kubenswrapper[4854]: I1125 11:25:59.327920 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-rq74j" Nov 25 11:25:59 crc kubenswrapper[4854]: I1125 11:25:59.502252 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rq74j"] Nov 25 11:25:59 crc kubenswrapper[4854]: I1125 11:25:59.566238 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-h7fm2"] Nov 25 11:25:59 crc kubenswrapper[4854]: I1125 11:25:59.566743 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-h7fm2" podUID="30234f04-abcc-479e-96fc-40bfbce02d59" containerName="registry-server" containerID="cri-o://fc49268dc00cf45aebd2f7dfe2d2495219a60d5a3cfbf130b475527ad352e081" gracePeriod=2 Nov 25 11:26:00 crc kubenswrapper[4854]: I1125 11:26:00.288040 4854 generic.go:334] "Generic (PLEG): container finished" podID="30234f04-abcc-479e-96fc-40bfbce02d59" containerID="fc49268dc00cf45aebd2f7dfe2d2495219a60d5a3cfbf130b475527ad352e081" exitCode=0 Nov 25 11:26:00 crc kubenswrapper[4854]: I1125 11:26:00.288129 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h7fm2" event={"ID":"30234f04-abcc-479e-96fc-40bfbce02d59","Type":"ContainerDied","Data":"fc49268dc00cf45aebd2f7dfe2d2495219a60d5a3cfbf130b475527ad352e081"} Nov 25 11:26:00 crc kubenswrapper[4854]: I1125 11:26:00.633937 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-h7fm2" Nov 25 11:26:00 crc kubenswrapper[4854]: I1125 11:26:00.814062 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/30234f04-abcc-479e-96fc-40bfbce02d59-utilities\") pod \"30234f04-abcc-479e-96fc-40bfbce02d59\" (UID: \"30234f04-abcc-479e-96fc-40bfbce02d59\") " Nov 25 11:26:00 crc kubenswrapper[4854]: I1125 11:26:00.814321 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/30234f04-abcc-479e-96fc-40bfbce02d59-catalog-content\") pod \"30234f04-abcc-479e-96fc-40bfbce02d59\" (UID: \"30234f04-abcc-479e-96fc-40bfbce02d59\") " Nov 25 11:26:00 crc kubenswrapper[4854]: I1125 11:26:00.814472 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/30234f04-abcc-479e-96fc-40bfbce02d59-utilities" (OuterVolumeSpecName: "utilities") pod "30234f04-abcc-479e-96fc-40bfbce02d59" (UID: "30234f04-abcc-479e-96fc-40bfbce02d59"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:26:00 crc kubenswrapper[4854]: I1125 11:26:00.814492 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tpjtf\" (UniqueName: \"kubernetes.io/projected/30234f04-abcc-479e-96fc-40bfbce02d59-kube-api-access-tpjtf\") pod \"30234f04-abcc-479e-96fc-40bfbce02d59\" (UID: \"30234f04-abcc-479e-96fc-40bfbce02d59\") " Nov 25 11:26:00 crc kubenswrapper[4854]: I1125 11:26:00.815298 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/30234f04-abcc-479e-96fc-40bfbce02d59-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 11:26:00 crc kubenswrapper[4854]: I1125 11:26:00.823053 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/30234f04-abcc-479e-96fc-40bfbce02d59-kube-api-access-tpjtf" (OuterVolumeSpecName: "kube-api-access-tpjtf") pod "30234f04-abcc-479e-96fc-40bfbce02d59" (UID: "30234f04-abcc-479e-96fc-40bfbce02d59"). InnerVolumeSpecName "kube-api-access-tpjtf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:26:00 crc kubenswrapper[4854]: I1125 11:26:00.877369 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/30234f04-abcc-479e-96fc-40bfbce02d59-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "30234f04-abcc-479e-96fc-40bfbce02d59" (UID: "30234f04-abcc-479e-96fc-40bfbce02d59"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:26:00 crc kubenswrapper[4854]: I1125 11:26:00.917560 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tpjtf\" (UniqueName: \"kubernetes.io/projected/30234f04-abcc-479e-96fc-40bfbce02d59-kube-api-access-tpjtf\") on node \"crc\" DevicePath \"\"" Nov 25 11:26:00 crc kubenswrapper[4854]: I1125 11:26:00.917600 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/30234f04-abcc-479e-96fc-40bfbce02d59-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 11:26:01 crc kubenswrapper[4854]: I1125 11:26:01.131133 4854 scope.go:117] "RemoveContainer" containerID="fc49268dc00cf45aebd2f7dfe2d2495219a60d5a3cfbf130b475527ad352e081" Nov 25 11:26:01 crc kubenswrapper[4854]: I1125 11:26:01.167978 4854 scope.go:117] "RemoveContainer" containerID="b4159bd3660426871f520f3a3b0003b54a5ca9e468df399068ebf9d78a716c32" Nov 25 11:26:01 crc kubenswrapper[4854]: I1125 11:26:01.230914 4854 scope.go:117] "RemoveContainer" containerID="8a06f252c6a2388f35ea353d5b524a277504048db22c21fa364fceafc373b4ac" Nov 25 11:26:01 crc kubenswrapper[4854]: I1125 11:26:01.332208 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-h7fm2" Nov 25 11:26:01 crc kubenswrapper[4854]: I1125 11:26:01.332284 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h7fm2" event={"ID":"30234f04-abcc-479e-96fc-40bfbce02d59","Type":"ContainerDied","Data":"aa2297ee2919c39f66f454aa0576fee718bab02804a4038e7590d1016751c0f4"} Nov 25 11:26:01 crc kubenswrapper[4854]: I1125 11:26:01.382125 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-h7fm2"] Nov 25 11:26:01 crc kubenswrapper[4854]: I1125 11:26:01.394255 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-h7fm2"] Nov 25 11:26:03 crc kubenswrapper[4854]: I1125 11:26:03.029619 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="30234f04-abcc-479e-96fc-40bfbce02d59" path="/var/lib/kubelet/pods/30234f04-abcc-479e-96fc-40bfbce02d59/volumes" Nov 25 11:26:09 crc kubenswrapper[4854]: I1125 11:26:09.013589 4854 scope.go:117] "RemoveContainer" containerID="7f7b0bfefe09066bd006d68161d8598a1327dba11d008e89a0ee63321e865909" Nov 25 11:26:09 crc kubenswrapper[4854]: E1125 11:26:09.014482 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:26:12 crc kubenswrapper[4854]: I1125 11:26:12.406282 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb844phb_92b1ebf7-6ffa-4d23-a719-dbd870f3d184/util/0.log" Nov 25 11:26:12 crc kubenswrapper[4854]: I1125 11:26:12.619315 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb844phb_92b1ebf7-6ffa-4d23-a719-dbd870f3d184/util/0.log" Nov 25 11:26:12 crc kubenswrapper[4854]: I1125 11:26:12.624954 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb844phb_92b1ebf7-6ffa-4d23-a719-dbd870f3d184/pull/0.log" Nov 25 11:26:12 crc kubenswrapper[4854]: I1125 11:26:12.678295 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb844phb_92b1ebf7-6ffa-4d23-a719-dbd870f3d184/pull/0.log" Nov 25 11:26:12 crc kubenswrapper[4854]: I1125 11:26:12.820773 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb844phb_92b1ebf7-6ffa-4d23-a719-dbd870f3d184/extract/0.log" Nov 25 11:26:12 crc kubenswrapper[4854]: I1125 11:26:12.870213 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb844phb_92b1ebf7-6ffa-4d23-a719-dbd870f3d184/pull/0.log" Nov 25 11:26:12 crc kubenswrapper[4854]: I1125 11:26:12.890158 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb844phb_92b1ebf7-6ffa-4d23-a719-dbd870f3d184/util/0.log" Nov 25 11:26:13 crc kubenswrapper[4854]: I1125 11:26:13.058711 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ehxm2x_17f88648-1c7f-409d-9f9c-9dad6ddfd03f/util/0.log" Nov 25 11:26:13 crc kubenswrapper[4854]: I1125 11:26:13.225398 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ehxm2x_17f88648-1c7f-409d-9f9c-9dad6ddfd03f/pull/0.log" Nov 25 11:26:13 crc kubenswrapper[4854]: I1125 11:26:13.228415 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ehxm2x_17f88648-1c7f-409d-9f9c-9dad6ddfd03f/util/0.log" Nov 25 11:26:13 crc kubenswrapper[4854]: I1125 11:26:13.234687 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ehxm2x_17f88648-1c7f-409d-9f9c-9dad6ddfd03f/pull/0.log" Nov 25 11:26:13 crc kubenswrapper[4854]: I1125 11:26:13.467508 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ehxm2x_17f88648-1c7f-409d-9f9c-9dad6ddfd03f/extract/0.log" Nov 25 11:26:13 crc kubenswrapper[4854]: I1125 11:26:13.509615 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ehxm2x_17f88648-1c7f-409d-9f9c-9dad6ddfd03f/pull/0.log" Nov 25 11:26:13 crc kubenswrapper[4854]: I1125 11:26:13.653504 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ehxm2x_17f88648-1c7f-409d-9f9c-9dad6ddfd03f/util/0.log" Nov 25 11:26:13 crc kubenswrapper[4854]: I1125 11:26:13.684084 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rbs4l_382f3712-78a8-4c24-bebc-530f145931e6/util/0.log" Nov 25 11:26:13 crc kubenswrapper[4854]: I1125 11:26:13.948861 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rbs4l_382f3712-78a8-4c24-bebc-530f145931e6/pull/0.log" Nov 25 11:26:13 crc kubenswrapper[4854]: I1125 11:26:13.978278 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rbs4l_382f3712-78a8-4c24-bebc-530f145931e6/util/0.log" Nov 25 11:26:13 crc kubenswrapper[4854]: I1125 11:26:13.996534 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rbs4l_382f3712-78a8-4c24-bebc-530f145931e6/pull/0.log" Nov 25 11:26:14 crc kubenswrapper[4854]: I1125 11:26:14.174134 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rbs4l_382f3712-78a8-4c24-bebc-530f145931e6/util/0.log" Nov 25 11:26:14 crc kubenswrapper[4854]: I1125 11:26:14.182797 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rbs4l_382f3712-78a8-4c24-bebc-530f145931e6/pull/0.log" Nov 25 11:26:14 crc kubenswrapper[4854]: I1125 11:26:14.217934 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rbs4l_382f3712-78a8-4c24-bebc-530f145931e6/extract/0.log" Nov 25 11:26:14 crc kubenswrapper[4854]: I1125 11:26:14.386552 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463ffrlfh_61f5661c-89ec-4adb-b6d6-ca38af3f0b5d/util/0.log" Nov 25 11:26:14 crc kubenswrapper[4854]: I1125 11:26:14.575581 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463ffrlfh_61f5661c-89ec-4adb-b6d6-ca38af3f0b5d/pull/0.log" Nov 25 11:26:14 crc kubenswrapper[4854]: I1125 11:26:14.580821 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463ffrlfh_61f5661c-89ec-4adb-b6d6-ca38af3f0b5d/pull/0.log" Nov 25 11:26:14 crc kubenswrapper[4854]: I1125 11:26:14.640141 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463ffrlfh_61f5661c-89ec-4adb-b6d6-ca38af3f0b5d/util/0.log" Nov 25 11:26:14 crc kubenswrapper[4854]: I1125 11:26:14.827334 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463ffrlfh_61f5661c-89ec-4adb-b6d6-ca38af3f0b5d/pull/0.log" Nov 25 11:26:14 crc kubenswrapper[4854]: I1125 11:26:14.866248 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463ffrlfh_61f5661c-89ec-4adb-b6d6-ca38af3f0b5d/util/0.log" Nov 25 11:26:14 crc kubenswrapper[4854]: I1125 11:26:14.887165 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463ffrlfh_61f5661c-89ec-4adb-b6d6-ca38af3f0b5d/extract/0.log" Nov 25 11:26:15 crc kubenswrapper[4854]: I1125 11:26:15.012949 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-rq74j_51c8c848-1585-4632-9e36-f39a0f5a7074/extract-utilities/0.log" Nov 25 11:26:15 crc kubenswrapper[4854]: I1125 11:26:15.232712 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-rq74j_51c8c848-1585-4632-9e36-f39a0f5a7074/extract-utilities/0.log" Nov 25 11:26:15 crc kubenswrapper[4854]: I1125 11:26:15.255840 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-rq74j_51c8c848-1585-4632-9e36-f39a0f5a7074/extract-content/0.log" Nov 25 11:26:15 crc kubenswrapper[4854]: I1125 11:26:15.272633 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-rq74j_51c8c848-1585-4632-9e36-f39a0f5a7074/extract-content/0.log" Nov 25 11:26:15 crc kubenswrapper[4854]: I1125 11:26:15.459904 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-rq74j_51c8c848-1585-4632-9e36-f39a0f5a7074/extract-utilities/0.log" Nov 25 11:26:15 crc kubenswrapper[4854]: I1125 11:26:15.513501 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-rq74j_51c8c848-1585-4632-9e36-f39a0f5a7074/extract-content/0.log" Nov 25 11:26:15 crc kubenswrapper[4854]: I1125 11:26:15.607209 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-rq74j_51c8c848-1585-4632-9e36-f39a0f5a7074/registry-server/0.log" Nov 25 11:26:15 crc kubenswrapper[4854]: I1125 11:26:15.974564 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5kx2t_260fd413-a6c2-4ae1-9dfa-d3bceccfd722/extract-utilities/0.log" Nov 25 11:26:16 crc kubenswrapper[4854]: I1125 11:26:16.262613 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5kx2t_260fd413-a6c2-4ae1-9dfa-d3bceccfd722/extract-utilities/0.log" Nov 25 11:26:16 crc kubenswrapper[4854]: I1125 11:26:16.301074 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5kx2t_260fd413-a6c2-4ae1-9dfa-d3bceccfd722/extract-content/0.log" Nov 25 11:26:16 crc kubenswrapper[4854]: I1125 11:26:16.349375 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5kx2t_260fd413-a6c2-4ae1-9dfa-d3bceccfd722/extract-content/0.log" Nov 25 11:26:16 crc kubenswrapper[4854]: I1125 11:26:16.501250 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5kx2t_260fd413-a6c2-4ae1-9dfa-d3bceccfd722/extract-utilities/0.log" Nov 25 11:26:16 crc kubenswrapper[4854]: I1125 11:26:16.598373 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69fl57_53ec1a11-25b3-41de-bde3-7c4d63dc5ce5/util/0.log" Nov 25 11:26:16 crc kubenswrapper[4854]: I1125 11:26:16.608494 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5kx2t_260fd413-a6c2-4ae1-9dfa-d3bceccfd722/extract-content/0.log" Nov 25 11:26:16 crc kubenswrapper[4854]: I1125 11:26:16.889521 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69fl57_53ec1a11-25b3-41de-bde3-7c4d63dc5ce5/pull/0.log" Nov 25 11:26:16 crc kubenswrapper[4854]: I1125 11:26:16.909227 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69fl57_53ec1a11-25b3-41de-bde3-7c4d63dc5ce5/util/0.log" Nov 25 11:26:16 crc kubenswrapper[4854]: I1125 11:26:16.914734 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69fl57_53ec1a11-25b3-41de-bde3-7c4d63dc5ce5/pull/0.log" Nov 25 11:26:17 crc kubenswrapper[4854]: I1125 11:26:17.172734 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69fl57_53ec1a11-25b3-41de-bde3-7c4d63dc5ce5/util/0.log" Nov 25 11:26:17 crc kubenswrapper[4854]: I1125 11:26:17.177920 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69fl57_53ec1a11-25b3-41de-bde3-7c4d63dc5ce5/pull/0.log" Nov 25 11:26:17 crc kubenswrapper[4854]: I1125 11:26:17.243852 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69fl57_53ec1a11-25b3-41de-bde3-7c4d63dc5ce5/extract/0.log" Nov 25 11:26:17 crc kubenswrapper[4854]: I1125 11:26:17.465132 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-glqlm_a6450a14-7c2a-4c3f-8861-876374e8cb9a/marketplace-operator/0.log" Nov 25 11:26:17 crc kubenswrapper[4854]: I1125 11:26:17.473636 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5kx2t_260fd413-a6c2-4ae1-9dfa-d3bceccfd722/registry-server/0.log" Nov 25 11:26:17 crc kubenswrapper[4854]: I1125 11:26:17.577605 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mzqj2_8ee67274-c034-4307-a53e-2655baa2d521/extract-utilities/0.log" Nov 25 11:26:17 crc kubenswrapper[4854]: I1125 11:26:17.725566 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mzqj2_8ee67274-c034-4307-a53e-2655baa2d521/extract-utilities/0.log" Nov 25 11:26:17 crc kubenswrapper[4854]: I1125 11:26:17.729271 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mzqj2_8ee67274-c034-4307-a53e-2655baa2d521/extract-content/0.log" Nov 25 11:26:17 crc kubenswrapper[4854]: I1125 11:26:17.809556 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mzqj2_8ee67274-c034-4307-a53e-2655baa2d521/extract-content/0.log" Nov 25 11:26:17 crc kubenswrapper[4854]: I1125 11:26:17.943019 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mzqj2_8ee67274-c034-4307-a53e-2655baa2d521/extract-content/0.log" Nov 25 11:26:18 crc kubenswrapper[4854]: I1125 11:26:18.011002 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mzqj2_8ee67274-c034-4307-a53e-2655baa2d521/extract-utilities/0.log" Nov 25 11:26:18 crc kubenswrapper[4854]: I1125 11:26:18.017078 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-pqw9x_8c436160-4490-4509-9eb3-1cf22c505e0a/extract-utilities/0.log" Nov 25 11:26:18 crc kubenswrapper[4854]: I1125 11:26:18.243195 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-pqw9x_8c436160-4490-4509-9eb3-1cf22c505e0a/extract-utilities/0.log" Nov 25 11:26:18 crc kubenswrapper[4854]: I1125 11:26:18.251102 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-pqw9x_8c436160-4490-4509-9eb3-1cf22c505e0a/extract-content/0.log" Nov 25 11:26:18 crc kubenswrapper[4854]: I1125 11:26:18.349944 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mzqj2_8ee67274-c034-4307-a53e-2655baa2d521/registry-server/0.log" Nov 25 11:26:18 crc kubenswrapper[4854]: I1125 11:26:18.351241 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-pqw9x_8c436160-4490-4509-9eb3-1cf22c505e0a/extract-content/0.log" Nov 25 11:26:18 crc kubenswrapper[4854]: I1125 11:26:18.508830 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-pqw9x_8c436160-4490-4509-9eb3-1cf22c505e0a/extract-content/0.log" Nov 25 11:26:18 crc kubenswrapper[4854]: I1125 11:26:18.531412 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-pqw9x_8c436160-4490-4509-9eb3-1cf22c505e0a/extract-utilities/0.log" Nov 25 11:26:18 crc kubenswrapper[4854]: I1125 11:26:18.904936 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-pqw9x_8c436160-4490-4509-9eb3-1cf22c505e0a/registry-server/0.log" Nov 25 11:26:24 crc kubenswrapper[4854]: I1125 11:26:24.013906 4854 scope.go:117] "RemoveContainer" containerID="7f7b0bfefe09066bd006d68161d8598a1327dba11d008e89a0ee63321e865909" Nov 25 11:26:24 crc kubenswrapper[4854]: E1125 11:26:24.014733 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:26:32 crc kubenswrapper[4854]: I1125 11:26:32.342381 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-668cf9dfbb-pt9cp_a79ee517-3034-49c0-98d1-a547d6f27e4c/prometheus-operator/0.log" Nov 25 11:26:32 crc kubenswrapper[4854]: I1125 11:26:32.569318 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-779d668dff-4pmqd_7a14e79c-e6de-4e5a-b016-5dad4e2baecb/prometheus-operator-admission-webhook/0.log" Nov 25 11:26:32 crc kubenswrapper[4854]: I1125 11:26:32.577081 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-779d668dff-vklqz_669f6ca8-f193-423b-91c3-ae56039ee589/prometheus-operator-admission-webhook/0.log" Nov 25 11:26:32 crc kubenswrapper[4854]: I1125 11:26:32.751826 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-ui-dashboards-7d5fb4cbfb-c6npv_a5aeba85-24cb-48ef-9050-93a40b4d67f0/observability-ui-dashboards/0.log" Nov 25 11:26:32 crc kubenswrapper[4854]: I1125 11:26:32.803614 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-d8bb48f5d-k2d6w_80bbdac7-392c-4568-9519-45ce6747e77c/operator/0.log" Nov 25 11:26:32 crc kubenswrapper[4854]: I1125 11:26:32.961744 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5446b9c989-djf5f_d54da2d3-0328-4498-a150-7a12963c3d43/perses-operator/0.log" Nov 25 11:26:36 crc kubenswrapper[4854]: I1125 11:26:36.013487 4854 scope.go:117] "RemoveContainer" containerID="7f7b0bfefe09066bd006d68161d8598a1327dba11d008e89a0ee63321e865909" Nov 25 11:26:36 crc kubenswrapper[4854]: E1125 11:26:36.014184 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:26:39 crc kubenswrapper[4854]: I1125 11:26:39.423431 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-ssfdz"] Nov 25 11:26:39 crc kubenswrapper[4854]: E1125 11:26:39.424417 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30234f04-abcc-479e-96fc-40bfbce02d59" containerName="registry-server" Nov 25 11:26:39 crc kubenswrapper[4854]: I1125 11:26:39.424430 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="30234f04-abcc-479e-96fc-40bfbce02d59" containerName="registry-server" Nov 25 11:26:39 crc kubenswrapper[4854]: E1125 11:26:39.424476 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30234f04-abcc-479e-96fc-40bfbce02d59" containerName="extract-utilities" Nov 25 11:26:39 crc kubenswrapper[4854]: I1125 11:26:39.424482 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="30234f04-abcc-479e-96fc-40bfbce02d59" containerName="extract-utilities" Nov 25 11:26:39 crc kubenswrapper[4854]: E1125 11:26:39.424498 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30234f04-abcc-479e-96fc-40bfbce02d59" containerName="extract-content" Nov 25 11:26:39 crc kubenswrapper[4854]: I1125 11:26:39.424504 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="30234f04-abcc-479e-96fc-40bfbce02d59" containerName="extract-content" Nov 25 11:26:39 crc kubenswrapper[4854]: I1125 11:26:39.424751 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="30234f04-abcc-479e-96fc-40bfbce02d59" containerName="registry-server" Nov 25 11:26:39 crc kubenswrapper[4854]: I1125 11:26:39.467462 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ssfdz"] Nov 25 11:26:39 crc kubenswrapper[4854]: I1125 11:26:39.467565 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ssfdz" Nov 25 11:26:39 crc kubenswrapper[4854]: I1125 11:26:39.523235 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24759a3f-40c9-49c8-b29a-3ae94a21290d-utilities\") pod \"redhat-marketplace-ssfdz\" (UID: \"24759a3f-40c9-49c8-b29a-3ae94a21290d\") " pod="openshift-marketplace/redhat-marketplace-ssfdz" Nov 25 11:26:39 crc kubenswrapper[4854]: I1125 11:26:39.523361 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24759a3f-40c9-49c8-b29a-3ae94a21290d-catalog-content\") pod \"redhat-marketplace-ssfdz\" (UID: \"24759a3f-40c9-49c8-b29a-3ae94a21290d\") " pod="openshift-marketplace/redhat-marketplace-ssfdz" Nov 25 11:26:39 crc kubenswrapper[4854]: I1125 11:26:39.523461 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tsqnj\" (UniqueName: \"kubernetes.io/projected/24759a3f-40c9-49c8-b29a-3ae94a21290d-kube-api-access-tsqnj\") pod \"redhat-marketplace-ssfdz\" (UID: \"24759a3f-40c9-49c8-b29a-3ae94a21290d\") " pod="openshift-marketplace/redhat-marketplace-ssfdz" Nov 25 11:26:39 crc kubenswrapper[4854]: I1125 11:26:39.626060 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24759a3f-40c9-49c8-b29a-3ae94a21290d-utilities\") pod \"redhat-marketplace-ssfdz\" (UID: \"24759a3f-40c9-49c8-b29a-3ae94a21290d\") " pod="openshift-marketplace/redhat-marketplace-ssfdz" Nov 25 11:26:39 crc kubenswrapper[4854]: I1125 11:26:39.626444 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24759a3f-40c9-49c8-b29a-3ae94a21290d-catalog-content\") pod \"redhat-marketplace-ssfdz\" (UID: \"24759a3f-40c9-49c8-b29a-3ae94a21290d\") " pod="openshift-marketplace/redhat-marketplace-ssfdz" Nov 25 11:26:39 crc kubenswrapper[4854]: I1125 11:26:39.626537 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24759a3f-40c9-49c8-b29a-3ae94a21290d-utilities\") pod \"redhat-marketplace-ssfdz\" (UID: \"24759a3f-40c9-49c8-b29a-3ae94a21290d\") " pod="openshift-marketplace/redhat-marketplace-ssfdz" Nov 25 11:26:39 crc kubenswrapper[4854]: I1125 11:26:39.626783 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24759a3f-40c9-49c8-b29a-3ae94a21290d-catalog-content\") pod \"redhat-marketplace-ssfdz\" (UID: \"24759a3f-40c9-49c8-b29a-3ae94a21290d\") " pod="openshift-marketplace/redhat-marketplace-ssfdz" Nov 25 11:26:39 crc kubenswrapper[4854]: I1125 11:26:39.626892 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tsqnj\" (UniqueName: \"kubernetes.io/projected/24759a3f-40c9-49c8-b29a-3ae94a21290d-kube-api-access-tsqnj\") pod \"redhat-marketplace-ssfdz\" (UID: \"24759a3f-40c9-49c8-b29a-3ae94a21290d\") " pod="openshift-marketplace/redhat-marketplace-ssfdz" Nov 25 11:26:39 crc kubenswrapper[4854]: I1125 11:26:39.663129 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tsqnj\" (UniqueName: \"kubernetes.io/projected/24759a3f-40c9-49c8-b29a-3ae94a21290d-kube-api-access-tsqnj\") pod \"redhat-marketplace-ssfdz\" (UID: \"24759a3f-40c9-49c8-b29a-3ae94a21290d\") " pod="openshift-marketplace/redhat-marketplace-ssfdz" Nov 25 11:26:39 crc kubenswrapper[4854]: I1125 11:26:39.800241 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ssfdz" Nov 25 11:26:40 crc kubenswrapper[4854]: I1125 11:26:40.310543 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ssfdz"] Nov 25 11:26:40 crc kubenswrapper[4854]: I1125 11:26:40.752020 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ssfdz" event={"ID":"24759a3f-40c9-49c8-b29a-3ae94a21290d","Type":"ContainerStarted","Data":"980fc8eced763e1185f78da373a5cf4d2322daec15b4ef5867b6f7a70ec10423"} Nov 25 11:26:40 crc kubenswrapper[4854]: I1125 11:26:40.753338 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ssfdz" event={"ID":"24759a3f-40c9-49c8-b29a-3ae94a21290d","Type":"ContainerStarted","Data":"a1aa575945279d19e7e5bf9a02e721f19f8b866c34e0833163a636c5c4e33d3e"} Nov 25 11:26:41 crc kubenswrapper[4854]: I1125 11:26:41.767479 4854 generic.go:334] "Generic (PLEG): container finished" podID="24759a3f-40c9-49c8-b29a-3ae94a21290d" containerID="980fc8eced763e1185f78da373a5cf4d2322daec15b4ef5867b6f7a70ec10423" exitCode=0 Nov 25 11:26:41 crc kubenswrapper[4854]: I1125 11:26:41.767534 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ssfdz" event={"ID":"24759a3f-40c9-49c8-b29a-3ae94a21290d","Type":"ContainerDied","Data":"980fc8eced763e1185f78da373a5cf4d2322daec15b4ef5867b6f7a70ec10423"} Nov 25 11:26:44 crc kubenswrapper[4854]: I1125 11:26:44.805091 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ssfdz" event={"ID":"24759a3f-40c9-49c8-b29a-3ae94a21290d","Type":"ContainerStarted","Data":"1e1885cb3ce84000b52e26a007be1e477870813b6b02d51dece8e489ea5f2cb7"} Nov 25 11:26:45 crc kubenswrapper[4854]: I1125 11:26:45.815971 4854 generic.go:334] "Generic (PLEG): container finished" podID="24759a3f-40c9-49c8-b29a-3ae94a21290d" containerID="1e1885cb3ce84000b52e26a007be1e477870813b6b02d51dece8e489ea5f2cb7" exitCode=0 Nov 25 11:26:45 crc kubenswrapper[4854]: I1125 11:26:45.816014 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ssfdz" event={"ID":"24759a3f-40c9-49c8-b29a-3ae94a21290d","Type":"ContainerDied","Data":"1e1885cb3ce84000b52e26a007be1e477870813b6b02d51dece8e489ea5f2cb7"} Nov 25 11:26:47 crc kubenswrapper[4854]: I1125 11:26:47.036394 4854 scope.go:117] "RemoveContainer" containerID="7f7b0bfefe09066bd006d68161d8598a1327dba11d008e89a0ee63321e865909" Nov 25 11:26:47 crc kubenswrapper[4854]: E1125 11:26:47.037302 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:26:47 crc kubenswrapper[4854]: I1125 11:26:47.838312 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ssfdz" event={"ID":"24759a3f-40c9-49c8-b29a-3ae94a21290d","Type":"ContainerStarted","Data":"44085ba89f6406c03939dee23e9492d180d5f20e98b3d1779870bec92c96dcfb"} Nov 25 11:26:48 crc kubenswrapper[4854]: I1125 11:26:48.047008 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-858c6c7dc8-bdkz2_f41b420b-a51d-40b7-9a74-5930db508da8/kube-rbac-proxy/0.log" Nov 25 11:26:48 crc kubenswrapper[4854]: I1125 11:26:48.087571 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-858c6c7dc8-bdkz2_f41b420b-a51d-40b7-9a74-5930db508da8/manager/0.log" Nov 25 11:26:48 crc kubenswrapper[4854]: I1125 11:26:48.880566 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-ssfdz" podStartSLOduration=4.278520187 podStartE2EDuration="9.880544031s" podCreationTimestamp="2025-11-25 11:26:39 +0000 UTC" firstStartedPulling="2025-11-25 11:26:41.771185055 +0000 UTC m=+6607.624178441" lastFinishedPulling="2025-11-25 11:26:47.373208909 +0000 UTC m=+6613.226202285" observedRunningTime="2025-11-25 11:26:48.873520051 +0000 UTC m=+6614.726513437" watchObservedRunningTime="2025-11-25 11:26:48.880544031 +0000 UTC m=+6614.733537417" Nov 25 11:26:49 crc kubenswrapper[4854]: I1125 11:26:49.573017 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-l9xtc"] Nov 25 11:26:49 crc kubenswrapper[4854]: I1125 11:26:49.575548 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l9xtc" Nov 25 11:26:49 crc kubenswrapper[4854]: I1125 11:26:49.584050 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-l9xtc"] Nov 25 11:26:49 crc kubenswrapper[4854]: I1125 11:26:49.690504 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/889e88b1-8383-4bcd-82c5-8d0ce734518d-catalog-content\") pod \"community-operators-l9xtc\" (UID: \"889e88b1-8383-4bcd-82c5-8d0ce734518d\") " pod="openshift-marketplace/community-operators-l9xtc" Nov 25 11:26:49 crc kubenswrapper[4854]: I1125 11:26:49.691003 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5z962\" (UniqueName: \"kubernetes.io/projected/889e88b1-8383-4bcd-82c5-8d0ce734518d-kube-api-access-5z962\") pod \"community-operators-l9xtc\" (UID: \"889e88b1-8383-4bcd-82c5-8d0ce734518d\") " pod="openshift-marketplace/community-operators-l9xtc" Nov 25 11:26:49 crc kubenswrapper[4854]: I1125 11:26:49.691062 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/889e88b1-8383-4bcd-82c5-8d0ce734518d-utilities\") pod \"community-operators-l9xtc\" (UID: \"889e88b1-8383-4bcd-82c5-8d0ce734518d\") " pod="openshift-marketplace/community-operators-l9xtc" Nov 25 11:26:49 crc kubenswrapper[4854]: I1125 11:26:49.793433 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/889e88b1-8383-4bcd-82c5-8d0ce734518d-catalog-content\") pod \"community-operators-l9xtc\" (UID: \"889e88b1-8383-4bcd-82c5-8d0ce734518d\") " pod="openshift-marketplace/community-operators-l9xtc" Nov 25 11:26:49 crc kubenswrapper[4854]: I1125 11:26:49.793696 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5z962\" (UniqueName: \"kubernetes.io/projected/889e88b1-8383-4bcd-82c5-8d0ce734518d-kube-api-access-5z962\") pod \"community-operators-l9xtc\" (UID: \"889e88b1-8383-4bcd-82c5-8d0ce734518d\") " pod="openshift-marketplace/community-operators-l9xtc" Nov 25 11:26:49 crc kubenswrapper[4854]: I1125 11:26:49.794187 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/889e88b1-8383-4bcd-82c5-8d0ce734518d-catalog-content\") pod \"community-operators-l9xtc\" (UID: \"889e88b1-8383-4bcd-82c5-8d0ce734518d\") " pod="openshift-marketplace/community-operators-l9xtc" Nov 25 11:26:49 crc kubenswrapper[4854]: I1125 11:26:49.794213 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/889e88b1-8383-4bcd-82c5-8d0ce734518d-utilities\") pod \"community-operators-l9xtc\" (UID: \"889e88b1-8383-4bcd-82c5-8d0ce734518d\") " pod="openshift-marketplace/community-operators-l9xtc" Nov 25 11:26:49 crc kubenswrapper[4854]: I1125 11:26:49.794757 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/889e88b1-8383-4bcd-82c5-8d0ce734518d-utilities\") pod \"community-operators-l9xtc\" (UID: \"889e88b1-8383-4bcd-82c5-8d0ce734518d\") " pod="openshift-marketplace/community-operators-l9xtc" Nov 25 11:26:49 crc kubenswrapper[4854]: I1125 11:26:49.800648 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-ssfdz" Nov 25 11:26:49 crc kubenswrapper[4854]: I1125 11:26:49.800724 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-ssfdz" Nov 25 11:26:49 crc kubenswrapper[4854]: I1125 11:26:49.821791 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5z962\" (UniqueName: \"kubernetes.io/projected/889e88b1-8383-4bcd-82c5-8d0ce734518d-kube-api-access-5z962\") pod \"community-operators-l9xtc\" (UID: \"889e88b1-8383-4bcd-82c5-8d0ce734518d\") " pod="openshift-marketplace/community-operators-l9xtc" Nov 25 11:26:49 crc kubenswrapper[4854]: I1125 11:26:49.870573 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-ssfdz" Nov 25 11:26:49 crc kubenswrapper[4854]: I1125 11:26:49.939095 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l9xtc" Nov 25 11:26:50 crc kubenswrapper[4854]: I1125 11:26:50.471401 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-l9xtc"] Nov 25 11:26:50 crc kubenswrapper[4854]: W1125 11:26:50.484500 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod889e88b1_8383_4bcd_82c5_8d0ce734518d.slice/crio-d91156b0dcff2b24fbe33512b73e9abe14284a5335f38d550a1a506943852893 WatchSource:0}: Error finding container d91156b0dcff2b24fbe33512b73e9abe14284a5335f38d550a1a506943852893: Status 404 returned error can't find the container with id d91156b0dcff2b24fbe33512b73e9abe14284a5335f38d550a1a506943852893 Nov 25 11:26:50 crc kubenswrapper[4854]: I1125 11:26:50.881367 4854 generic.go:334] "Generic (PLEG): container finished" podID="889e88b1-8383-4bcd-82c5-8d0ce734518d" containerID="1c3377006538fe48eb1957e07541378a9fb2e5729796ae82b5ffbf9bd314aacd" exitCode=0 Nov 25 11:26:50 crc kubenswrapper[4854]: I1125 11:26:50.881457 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l9xtc" event={"ID":"889e88b1-8383-4bcd-82c5-8d0ce734518d","Type":"ContainerDied","Data":"1c3377006538fe48eb1957e07541378a9fb2e5729796ae82b5ffbf9bd314aacd"} Nov 25 11:26:50 crc kubenswrapper[4854]: I1125 11:26:50.882166 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l9xtc" event={"ID":"889e88b1-8383-4bcd-82c5-8d0ce734518d","Type":"ContainerStarted","Data":"d91156b0dcff2b24fbe33512b73e9abe14284a5335f38d550a1a506943852893"} Nov 25 11:26:57 crc kubenswrapper[4854]: I1125 11:26:57.970481 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l9xtc" event={"ID":"889e88b1-8383-4bcd-82c5-8d0ce734518d","Type":"ContainerStarted","Data":"3b9ecd42e3f9d1f04980e1fae76bd7cedd6e3b194ff8b96c29aefba03809bef4"} Nov 25 11:26:58 crc kubenswrapper[4854]: I1125 11:26:58.026400 4854 scope.go:117] "RemoveContainer" containerID="7f7b0bfefe09066bd006d68161d8598a1327dba11d008e89a0ee63321e865909" Nov 25 11:26:58 crc kubenswrapper[4854]: E1125 11:26:58.026799 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:26:59 crc kubenswrapper[4854]: I1125 11:26:59.857977 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-ssfdz" Nov 25 11:27:00 crc kubenswrapper[4854]: I1125 11:27:00.010231 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ssfdz"] Nov 25 11:27:00 crc kubenswrapper[4854]: I1125 11:27:00.010471 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-ssfdz" podUID="24759a3f-40c9-49c8-b29a-3ae94a21290d" containerName="registry-server" containerID="cri-o://44085ba89f6406c03939dee23e9492d180d5f20e98b3d1779870bec92c96dcfb" gracePeriod=2 Nov 25 11:27:02 crc kubenswrapper[4854]: I1125 11:27:02.032251 4854 generic.go:334] "Generic (PLEG): container finished" podID="24759a3f-40c9-49c8-b29a-3ae94a21290d" containerID="44085ba89f6406c03939dee23e9492d180d5f20e98b3d1779870bec92c96dcfb" exitCode=0 Nov 25 11:27:02 crc kubenswrapper[4854]: I1125 11:27:02.032608 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ssfdz" event={"ID":"24759a3f-40c9-49c8-b29a-3ae94a21290d","Type":"ContainerDied","Data":"44085ba89f6406c03939dee23e9492d180d5f20e98b3d1779870bec92c96dcfb"} Nov 25 11:27:02 crc kubenswrapper[4854]: I1125 11:27:02.696701 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ssfdz" Nov 25 11:27:02 crc kubenswrapper[4854]: I1125 11:27:02.760401 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24759a3f-40c9-49c8-b29a-3ae94a21290d-utilities\") pod \"24759a3f-40c9-49c8-b29a-3ae94a21290d\" (UID: \"24759a3f-40c9-49c8-b29a-3ae94a21290d\") " Nov 25 11:27:02 crc kubenswrapper[4854]: I1125 11:27:02.761452 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tsqnj\" (UniqueName: \"kubernetes.io/projected/24759a3f-40c9-49c8-b29a-3ae94a21290d-kube-api-access-tsqnj\") pod \"24759a3f-40c9-49c8-b29a-3ae94a21290d\" (UID: \"24759a3f-40c9-49c8-b29a-3ae94a21290d\") " Nov 25 11:27:02 crc kubenswrapper[4854]: I1125 11:27:02.761154 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/24759a3f-40c9-49c8-b29a-3ae94a21290d-utilities" (OuterVolumeSpecName: "utilities") pod "24759a3f-40c9-49c8-b29a-3ae94a21290d" (UID: "24759a3f-40c9-49c8-b29a-3ae94a21290d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:27:02 crc kubenswrapper[4854]: I1125 11:27:02.761695 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24759a3f-40c9-49c8-b29a-3ae94a21290d-catalog-content\") pod \"24759a3f-40c9-49c8-b29a-3ae94a21290d\" (UID: \"24759a3f-40c9-49c8-b29a-3ae94a21290d\") " Nov 25 11:27:02 crc kubenswrapper[4854]: I1125 11:27:02.763049 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24759a3f-40c9-49c8-b29a-3ae94a21290d-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 11:27:02 crc kubenswrapper[4854]: I1125 11:27:02.794964 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24759a3f-40c9-49c8-b29a-3ae94a21290d-kube-api-access-tsqnj" (OuterVolumeSpecName: "kube-api-access-tsqnj") pod "24759a3f-40c9-49c8-b29a-3ae94a21290d" (UID: "24759a3f-40c9-49c8-b29a-3ae94a21290d"). InnerVolumeSpecName "kube-api-access-tsqnj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:27:02 crc kubenswrapper[4854]: I1125 11:27:02.822065 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/24759a3f-40c9-49c8-b29a-3ae94a21290d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "24759a3f-40c9-49c8-b29a-3ae94a21290d" (UID: "24759a3f-40c9-49c8-b29a-3ae94a21290d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:27:02 crc kubenswrapper[4854]: I1125 11:27:02.867579 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24759a3f-40c9-49c8-b29a-3ae94a21290d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 11:27:02 crc kubenswrapper[4854]: I1125 11:27:02.867620 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tsqnj\" (UniqueName: \"kubernetes.io/projected/24759a3f-40c9-49c8-b29a-3ae94a21290d-kube-api-access-tsqnj\") on node \"crc\" DevicePath \"\"" Nov 25 11:27:03 crc kubenswrapper[4854]: I1125 11:27:03.101398 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ssfdz" Nov 25 11:27:03 crc kubenswrapper[4854]: I1125 11:27:03.196745 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ssfdz" event={"ID":"24759a3f-40c9-49c8-b29a-3ae94a21290d","Type":"ContainerDied","Data":"a1aa575945279d19e7e5bf9a02e721f19f8b866c34e0833163a636c5c4e33d3e"} Nov 25 11:27:03 crc kubenswrapper[4854]: I1125 11:27:03.196808 4854 scope.go:117] "RemoveContainer" containerID="44085ba89f6406c03939dee23e9492d180d5f20e98b3d1779870bec92c96dcfb" Nov 25 11:27:03 crc kubenswrapper[4854]: I1125 11:27:03.222307 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ssfdz"] Nov 25 11:27:03 crc kubenswrapper[4854]: I1125 11:27:03.235560 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-ssfdz"] Nov 25 11:27:03 crc kubenswrapper[4854]: I1125 11:27:03.281100 4854 scope.go:117] "RemoveContainer" containerID="1e1885cb3ce84000b52e26a007be1e477870813b6b02d51dece8e489ea5f2cb7" Nov 25 11:27:03 crc kubenswrapper[4854]: I1125 11:27:03.311620 4854 scope.go:117] "RemoveContainer" containerID="980fc8eced763e1185f78da373a5cf4d2322daec15b4ef5867b6f7a70ec10423" Nov 25 11:27:04 crc kubenswrapper[4854]: I1125 11:27:04.121071 4854 generic.go:334] "Generic (PLEG): container finished" podID="889e88b1-8383-4bcd-82c5-8d0ce734518d" containerID="3b9ecd42e3f9d1f04980e1fae76bd7cedd6e3b194ff8b96c29aefba03809bef4" exitCode=0 Nov 25 11:27:04 crc kubenswrapper[4854]: I1125 11:27:04.121292 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l9xtc" event={"ID":"889e88b1-8383-4bcd-82c5-8d0ce734518d","Type":"ContainerDied","Data":"3b9ecd42e3f9d1f04980e1fae76bd7cedd6e3b194ff8b96c29aefba03809bef4"} Nov 25 11:27:05 crc kubenswrapper[4854]: I1125 11:27:05.033054 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="24759a3f-40c9-49c8-b29a-3ae94a21290d" path="/var/lib/kubelet/pods/24759a3f-40c9-49c8-b29a-3ae94a21290d/volumes" Nov 25 11:27:07 crc kubenswrapper[4854]: I1125 11:27:07.156089 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l9xtc" event={"ID":"889e88b1-8383-4bcd-82c5-8d0ce734518d","Type":"ContainerStarted","Data":"9df8d18453b9c8257b0ff4dbaba311184101ef9b50481b8c660d6fb8f601f0a7"} Nov 25 11:27:09 crc kubenswrapper[4854]: I1125 11:27:09.939769 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-l9xtc" Nov 25 11:27:09 crc kubenswrapper[4854]: I1125 11:27:09.940373 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-l9xtc" Nov 25 11:27:11 crc kubenswrapper[4854]: I1125 11:27:11.012486 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-l9xtc" podUID="889e88b1-8383-4bcd-82c5-8d0ce734518d" containerName="registry-server" probeResult="failure" output=< Nov 25 11:27:11 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:27:11 crc kubenswrapper[4854]: > Nov 25 11:27:12 crc kubenswrapper[4854]: I1125 11:27:12.013912 4854 scope.go:117] "RemoveContainer" containerID="7f7b0bfefe09066bd006d68161d8598a1327dba11d008e89a0ee63321e865909" Nov 25 11:27:12 crc kubenswrapper[4854]: E1125 11:27:12.014392 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:27:21 crc kubenswrapper[4854]: I1125 11:27:21.006246 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-l9xtc" podUID="889e88b1-8383-4bcd-82c5-8d0ce734518d" containerName="registry-server" probeResult="failure" output=< Nov 25 11:27:21 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:27:21 crc kubenswrapper[4854]: > Nov 25 11:27:27 crc kubenswrapper[4854]: I1125 11:27:27.013618 4854 scope.go:117] "RemoveContainer" containerID="7f7b0bfefe09066bd006d68161d8598a1327dba11d008e89a0ee63321e865909" Nov 25 11:27:27 crc kubenswrapper[4854]: E1125 11:27:27.014437 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:27:31 crc kubenswrapper[4854]: I1125 11:27:31.008938 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-l9xtc" podUID="889e88b1-8383-4bcd-82c5-8d0ce734518d" containerName="registry-server" probeResult="failure" output=< Nov 25 11:27:31 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:27:31 crc kubenswrapper[4854]: > Nov 25 11:27:41 crc kubenswrapper[4854]: I1125 11:27:41.023337 4854 scope.go:117] "RemoveContainer" containerID="7f7b0bfefe09066bd006d68161d8598a1327dba11d008e89a0ee63321e865909" Nov 25 11:27:41 crc kubenswrapper[4854]: I1125 11:27:41.043163 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-l9xtc" podUID="889e88b1-8383-4bcd-82c5-8d0ce734518d" containerName="registry-server" probeResult="failure" output=< Nov 25 11:27:41 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:27:41 crc kubenswrapper[4854]: > Nov 25 11:27:41 crc kubenswrapper[4854]: E1125 11:27:41.058786 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:27:50 crc kubenswrapper[4854]: I1125 11:27:50.002093 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-l9xtc" Nov 25 11:27:50 crc kubenswrapper[4854]: I1125 11:27:50.024357 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-l9xtc" podStartSLOduration=45.934853346 podStartE2EDuration="1m1.024337074s" podCreationTimestamp="2025-11-25 11:26:49 +0000 UTC" firstStartedPulling="2025-11-25 11:26:50.88342263 +0000 UTC m=+6616.736416006" lastFinishedPulling="2025-11-25 11:27:05.972906358 +0000 UTC m=+6631.825899734" observedRunningTime="2025-11-25 11:27:07.181042765 +0000 UTC m=+6633.034036161" watchObservedRunningTime="2025-11-25 11:27:50.024337074 +0000 UTC m=+6675.877330450" Nov 25 11:27:50 crc kubenswrapper[4854]: I1125 11:27:50.059869 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-l9xtc" Nov 25 11:27:50 crc kubenswrapper[4854]: I1125 11:27:50.812305 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-l9xtc"] Nov 25 11:27:51 crc kubenswrapper[4854]: I1125 11:27:51.725641 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-l9xtc" podUID="889e88b1-8383-4bcd-82c5-8d0ce734518d" containerName="registry-server" containerID="cri-o://9df8d18453b9c8257b0ff4dbaba311184101ef9b50481b8c660d6fb8f601f0a7" gracePeriod=2 Nov 25 11:27:52 crc kubenswrapper[4854]: I1125 11:27:52.577229 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l9xtc" Nov 25 11:27:52 crc kubenswrapper[4854]: I1125 11:27:52.621506 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/889e88b1-8383-4bcd-82c5-8d0ce734518d-catalog-content\") pod \"889e88b1-8383-4bcd-82c5-8d0ce734518d\" (UID: \"889e88b1-8383-4bcd-82c5-8d0ce734518d\") " Nov 25 11:27:52 crc kubenswrapper[4854]: I1125 11:27:52.621701 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/889e88b1-8383-4bcd-82c5-8d0ce734518d-utilities\") pod \"889e88b1-8383-4bcd-82c5-8d0ce734518d\" (UID: \"889e88b1-8383-4bcd-82c5-8d0ce734518d\") " Nov 25 11:27:52 crc kubenswrapper[4854]: I1125 11:27:52.621793 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5z962\" (UniqueName: \"kubernetes.io/projected/889e88b1-8383-4bcd-82c5-8d0ce734518d-kube-api-access-5z962\") pod \"889e88b1-8383-4bcd-82c5-8d0ce734518d\" (UID: \"889e88b1-8383-4bcd-82c5-8d0ce734518d\") " Nov 25 11:27:52 crc kubenswrapper[4854]: I1125 11:27:52.622772 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/889e88b1-8383-4bcd-82c5-8d0ce734518d-utilities" (OuterVolumeSpecName: "utilities") pod "889e88b1-8383-4bcd-82c5-8d0ce734518d" (UID: "889e88b1-8383-4bcd-82c5-8d0ce734518d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:27:52 crc kubenswrapper[4854]: I1125 11:27:52.669490 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/889e88b1-8383-4bcd-82c5-8d0ce734518d-kube-api-access-5z962" (OuterVolumeSpecName: "kube-api-access-5z962") pod "889e88b1-8383-4bcd-82c5-8d0ce734518d" (UID: "889e88b1-8383-4bcd-82c5-8d0ce734518d"). InnerVolumeSpecName "kube-api-access-5z962". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:27:52 crc kubenswrapper[4854]: I1125 11:27:52.725014 4854 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/889e88b1-8383-4bcd-82c5-8d0ce734518d-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 11:27:52 crc kubenswrapper[4854]: I1125 11:27:52.725090 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5z962\" (UniqueName: \"kubernetes.io/projected/889e88b1-8383-4bcd-82c5-8d0ce734518d-kube-api-access-5z962\") on node \"crc\" DevicePath \"\"" Nov 25 11:27:52 crc kubenswrapper[4854]: I1125 11:27:52.737584 4854 generic.go:334] "Generic (PLEG): container finished" podID="889e88b1-8383-4bcd-82c5-8d0ce734518d" containerID="9df8d18453b9c8257b0ff4dbaba311184101ef9b50481b8c660d6fb8f601f0a7" exitCode=0 Nov 25 11:27:52 crc kubenswrapper[4854]: I1125 11:27:52.737634 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l9xtc" event={"ID":"889e88b1-8383-4bcd-82c5-8d0ce734518d","Type":"ContainerDied","Data":"9df8d18453b9c8257b0ff4dbaba311184101ef9b50481b8c660d6fb8f601f0a7"} Nov 25 11:27:52 crc kubenswrapper[4854]: I1125 11:27:52.737702 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l9xtc" event={"ID":"889e88b1-8383-4bcd-82c5-8d0ce734518d","Type":"ContainerDied","Data":"d91156b0dcff2b24fbe33512b73e9abe14284a5335f38d550a1a506943852893"} Nov 25 11:27:52 crc kubenswrapper[4854]: I1125 11:27:52.737726 4854 scope.go:117] "RemoveContainer" containerID="9df8d18453b9c8257b0ff4dbaba311184101ef9b50481b8c660d6fb8f601f0a7" Nov 25 11:27:52 crc kubenswrapper[4854]: I1125 11:27:52.738772 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l9xtc" Nov 25 11:27:52 crc kubenswrapper[4854]: I1125 11:27:52.770546 4854 scope.go:117] "RemoveContainer" containerID="3b9ecd42e3f9d1f04980e1fae76bd7cedd6e3b194ff8b96c29aefba03809bef4" Nov 25 11:27:52 crc kubenswrapper[4854]: I1125 11:27:52.781495 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/889e88b1-8383-4bcd-82c5-8d0ce734518d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "889e88b1-8383-4bcd-82c5-8d0ce734518d" (UID: "889e88b1-8383-4bcd-82c5-8d0ce734518d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:27:52 crc kubenswrapper[4854]: I1125 11:27:52.799976 4854 scope.go:117] "RemoveContainer" containerID="1c3377006538fe48eb1957e07541378a9fb2e5729796ae82b5ffbf9bd314aacd" Nov 25 11:27:52 crc kubenswrapper[4854]: I1125 11:27:52.827155 4854 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/889e88b1-8383-4bcd-82c5-8d0ce734518d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 11:27:52 crc kubenswrapper[4854]: I1125 11:27:52.875361 4854 scope.go:117] "RemoveContainer" containerID="9df8d18453b9c8257b0ff4dbaba311184101ef9b50481b8c660d6fb8f601f0a7" Nov 25 11:27:52 crc kubenswrapper[4854]: E1125 11:27:52.877472 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9df8d18453b9c8257b0ff4dbaba311184101ef9b50481b8c660d6fb8f601f0a7\": container with ID starting with 9df8d18453b9c8257b0ff4dbaba311184101ef9b50481b8c660d6fb8f601f0a7 not found: ID does not exist" containerID="9df8d18453b9c8257b0ff4dbaba311184101ef9b50481b8c660d6fb8f601f0a7" Nov 25 11:27:52 crc kubenswrapper[4854]: I1125 11:27:52.877577 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9df8d18453b9c8257b0ff4dbaba311184101ef9b50481b8c660d6fb8f601f0a7"} err="failed to get container status \"9df8d18453b9c8257b0ff4dbaba311184101ef9b50481b8c660d6fb8f601f0a7\": rpc error: code = NotFound desc = could not find container \"9df8d18453b9c8257b0ff4dbaba311184101ef9b50481b8c660d6fb8f601f0a7\": container with ID starting with 9df8d18453b9c8257b0ff4dbaba311184101ef9b50481b8c660d6fb8f601f0a7 not found: ID does not exist" Nov 25 11:27:52 crc kubenswrapper[4854]: I1125 11:27:52.877648 4854 scope.go:117] "RemoveContainer" containerID="3b9ecd42e3f9d1f04980e1fae76bd7cedd6e3b194ff8b96c29aefba03809bef4" Nov 25 11:27:52 crc kubenswrapper[4854]: E1125 11:27:52.878257 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3b9ecd42e3f9d1f04980e1fae76bd7cedd6e3b194ff8b96c29aefba03809bef4\": container with ID starting with 3b9ecd42e3f9d1f04980e1fae76bd7cedd6e3b194ff8b96c29aefba03809bef4 not found: ID does not exist" containerID="3b9ecd42e3f9d1f04980e1fae76bd7cedd6e3b194ff8b96c29aefba03809bef4" Nov 25 11:27:52 crc kubenswrapper[4854]: I1125 11:27:52.878287 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3b9ecd42e3f9d1f04980e1fae76bd7cedd6e3b194ff8b96c29aefba03809bef4"} err="failed to get container status \"3b9ecd42e3f9d1f04980e1fae76bd7cedd6e3b194ff8b96c29aefba03809bef4\": rpc error: code = NotFound desc = could not find container \"3b9ecd42e3f9d1f04980e1fae76bd7cedd6e3b194ff8b96c29aefba03809bef4\": container with ID starting with 3b9ecd42e3f9d1f04980e1fae76bd7cedd6e3b194ff8b96c29aefba03809bef4 not found: ID does not exist" Nov 25 11:27:52 crc kubenswrapper[4854]: I1125 11:27:52.878305 4854 scope.go:117] "RemoveContainer" containerID="1c3377006538fe48eb1957e07541378a9fb2e5729796ae82b5ffbf9bd314aacd" Nov 25 11:27:52 crc kubenswrapper[4854]: E1125 11:27:52.878822 4854 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c3377006538fe48eb1957e07541378a9fb2e5729796ae82b5ffbf9bd314aacd\": container with ID starting with 1c3377006538fe48eb1957e07541378a9fb2e5729796ae82b5ffbf9bd314aacd not found: ID does not exist" containerID="1c3377006538fe48eb1957e07541378a9fb2e5729796ae82b5ffbf9bd314aacd" Nov 25 11:27:52 crc kubenswrapper[4854]: I1125 11:27:52.878852 4854 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c3377006538fe48eb1957e07541378a9fb2e5729796ae82b5ffbf9bd314aacd"} err="failed to get container status \"1c3377006538fe48eb1957e07541378a9fb2e5729796ae82b5ffbf9bd314aacd\": rpc error: code = NotFound desc = could not find container \"1c3377006538fe48eb1957e07541378a9fb2e5729796ae82b5ffbf9bd314aacd\": container with ID starting with 1c3377006538fe48eb1957e07541378a9fb2e5729796ae82b5ffbf9bd314aacd not found: ID does not exist" Nov 25 11:27:53 crc kubenswrapper[4854]: I1125 11:27:53.071913 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-l9xtc"] Nov 25 11:27:53 crc kubenswrapper[4854]: I1125 11:27:53.083509 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-l9xtc"] Nov 25 11:27:55 crc kubenswrapper[4854]: I1125 11:27:55.023176 4854 scope.go:117] "RemoveContainer" containerID="7f7b0bfefe09066bd006d68161d8598a1327dba11d008e89a0ee63321e865909" Nov 25 11:27:55 crc kubenswrapper[4854]: E1125 11:27:55.023856 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:27:55 crc kubenswrapper[4854]: I1125 11:27:55.034414 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="889e88b1-8383-4bcd-82c5-8d0ce734518d" path="/var/lib/kubelet/pods/889e88b1-8383-4bcd-82c5-8d0ce734518d/volumes" Nov 25 11:28:08 crc kubenswrapper[4854]: I1125 11:28:08.013954 4854 scope.go:117] "RemoveContainer" containerID="7f7b0bfefe09066bd006d68161d8598a1327dba11d008e89a0ee63321e865909" Nov 25 11:28:08 crc kubenswrapper[4854]: E1125 11:28:08.014825 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:28:19 crc kubenswrapper[4854]: I1125 11:28:19.013200 4854 scope.go:117] "RemoveContainer" containerID="7f7b0bfefe09066bd006d68161d8598a1327dba11d008e89a0ee63321e865909" Nov 25 11:28:19 crc kubenswrapper[4854]: E1125 11:28:19.013942 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:28:30 crc kubenswrapper[4854]: I1125 11:28:30.013111 4854 scope.go:117] "RemoveContainer" containerID="7f7b0bfefe09066bd006d68161d8598a1327dba11d008e89a0ee63321e865909" Nov 25 11:28:30 crc kubenswrapper[4854]: E1125 11:28:30.013905 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:28:43 crc kubenswrapper[4854]: I1125 11:28:43.017003 4854 scope.go:117] "RemoveContainer" containerID="7f7b0bfefe09066bd006d68161d8598a1327dba11d008e89a0ee63321e865909" Nov 25 11:28:43 crc kubenswrapper[4854]: E1125 11:28:43.017922 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:28:55 crc kubenswrapper[4854]: I1125 11:28:55.023982 4854 scope.go:117] "RemoveContainer" containerID="7f7b0bfefe09066bd006d68161d8598a1327dba11d008e89a0ee63321e865909" Nov 25 11:28:55 crc kubenswrapper[4854]: E1125 11:28:55.024946 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:29:01 crc kubenswrapper[4854]: I1125 11:29:01.912657 4854 scope.go:117] "RemoveContainer" containerID="e1520995846f9712e3620f8aa78ebce8cef85ce00f5f8c611ab33c18fdffd6ac" Nov 25 11:29:08 crc kubenswrapper[4854]: I1125 11:29:08.014953 4854 scope.go:117] "RemoveContainer" containerID="7f7b0bfefe09066bd006d68161d8598a1327dba11d008e89a0ee63321e865909" Nov 25 11:29:08 crc kubenswrapper[4854]: E1125 11:29:08.015979 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:29:20 crc kubenswrapper[4854]: I1125 11:29:20.014557 4854 scope.go:117] "RemoveContainer" containerID="7f7b0bfefe09066bd006d68161d8598a1327dba11d008e89a0ee63321e865909" Nov 25 11:29:20 crc kubenswrapper[4854]: E1125 11:29:20.015307 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:29:21 crc kubenswrapper[4854]: I1125 11:29:21.808016 4854 generic.go:334] "Generic (PLEG): container finished" podID="afbe3ed1-5005-490f-a465-9a711e8fa24e" containerID="a013b66ee810706a53bd3909c1fcde5809bf827d72b073743e6d84143ec09036" exitCode=0 Nov 25 11:29:21 crc kubenswrapper[4854]: I1125 11:29:21.808335 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sm8s5/must-gather-5468f" event={"ID":"afbe3ed1-5005-490f-a465-9a711e8fa24e","Type":"ContainerDied","Data":"a013b66ee810706a53bd3909c1fcde5809bf827d72b073743e6d84143ec09036"} Nov 25 11:29:21 crc kubenswrapper[4854]: I1125 11:29:21.809245 4854 scope.go:117] "RemoveContainer" containerID="a013b66ee810706a53bd3909c1fcde5809bf827d72b073743e6d84143ec09036" Nov 25 11:29:23 crc kubenswrapper[4854]: I1125 11:29:23.065035 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-sm8s5_must-gather-5468f_afbe3ed1-5005-490f-a465-9a711e8fa24e/gather/0.log" Nov 25 11:29:32 crc kubenswrapper[4854]: I1125 11:29:32.649593 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-sm8s5/must-gather-5468f"] Nov 25 11:29:32 crc kubenswrapper[4854]: I1125 11:29:32.650616 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-sm8s5/must-gather-5468f" podUID="afbe3ed1-5005-490f-a465-9a711e8fa24e" containerName="copy" containerID="cri-o://ec9f8de1c095957e6fa5e8b51b16c8c87790218f976c18170ed8ec55a7119ffa" gracePeriod=2 Nov 25 11:29:32 crc kubenswrapper[4854]: I1125 11:29:32.671568 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-sm8s5/must-gather-5468f"] Nov 25 11:29:33 crc kubenswrapper[4854]: I1125 11:29:33.041700 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-sm8s5_must-gather-5468f_afbe3ed1-5005-490f-a465-9a711e8fa24e/copy/0.log" Nov 25 11:29:33 crc kubenswrapper[4854]: I1125 11:29:33.043519 4854 generic.go:334] "Generic (PLEG): container finished" podID="afbe3ed1-5005-490f-a465-9a711e8fa24e" containerID="ec9f8de1c095957e6fa5e8b51b16c8c87790218f976c18170ed8ec55a7119ffa" exitCode=143 Nov 25 11:29:33 crc kubenswrapper[4854]: I1125 11:29:33.304522 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-sm8s5_must-gather-5468f_afbe3ed1-5005-490f-a465-9a711e8fa24e/copy/0.log" Nov 25 11:29:33 crc kubenswrapper[4854]: I1125 11:29:33.305048 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sm8s5/must-gather-5468f" Nov 25 11:29:33 crc kubenswrapper[4854]: I1125 11:29:33.364741 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-clf8j\" (UniqueName: \"kubernetes.io/projected/afbe3ed1-5005-490f-a465-9a711e8fa24e-kube-api-access-clf8j\") pod \"afbe3ed1-5005-490f-a465-9a711e8fa24e\" (UID: \"afbe3ed1-5005-490f-a465-9a711e8fa24e\") " Nov 25 11:29:33 crc kubenswrapper[4854]: I1125 11:29:33.365225 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/afbe3ed1-5005-490f-a465-9a711e8fa24e-must-gather-output\") pod \"afbe3ed1-5005-490f-a465-9a711e8fa24e\" (UID: \"afbe3ed1-5005-490f-a465-9a711e8fa24e\") " Nov 25 11:29:33 crc kubenswrapper[4854]: I1125 11:29:33.381073 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/afbe3ed1-5005-490f-a465-9a711e8fa24e-kube-api-access-clf8j" (OuterVolumeSpecName: "kube-api-access-clf8j") pod "afbe3ed1-5005-490f-a465-9a711e8fa24e" (UID: "afbe3ed1-5005-490f-a465-9a711e8fa24e"). InnerVolumeSpecName "kube-api-access-clf8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:29:33 crc kubenswrapper[4854]: I1125 11:29:33.468135 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-clf8j\" (UniqueName: \"kubernetes.io/projected/afbe3ed1-5005-490f-a465-9a711e8fa24e-kube-api-access-clf8j\") on node \"crc\" DevicePath \"\"" Nov 25 11:29:33 crc kubenswrapper[4854]: I1125 11:29:33.621407 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/afbe3ed1-5005-490f-a465-9a711e8fa24e-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "afbe3ed1-5005-490f-a465-9a711e8fa24e" (UID: "afbe3ed1-5005-490f-a465-9a711e8fa24e"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:29:33 crc kubenswrapper[4854]: I1125 11:29:33.673481 4854 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/afbe3ed1-5005-490f-a465-9a711e8fa24e-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 25 11:29:34 crc kubenswrapper[4854]: I1125 11:29:34.061634 4854 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-sm8s5_must-gather-5468f_afbe3ed1-5005-490f-a465-9a711e8fa24e/copy/0.log" Nov 25 11:29:34 crc kubenswrapper[4854]: I1125 11:29:34.062645 4854 scope.go:117] "RemoveContainer" containerID="ec9f8de1c095957e6fa5e8b51b16c8c87790218f976c18170ed8ec55a7119ffa" Nov 25 11:29:34 crc kubenswrapper[4854]: I1125 11:29:34.062661 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sm8s5/must-gather-5468f" Nov 25 11:29:34 crc kubenswrapper[4854]: I1125 11:29:34.138863 4854 scope.go:117] "RemoveContainer" containerID="a013b66ee810706a53bd3909c1fcde5809bf827d72b073743e6d84143ec09036" Nov 25 11:29:35 crc kubenswrapper[4854]: I1125 11:29:35.156595 4854 scope.go:117] "RemoveContainer" containerID="7f7b0bfefe09066bd006d68161d8598a1327dba11d008e89a0ee63321e865909" Nov 25 11:29:35 crc kubenswrapper[4854]: E1125 11:29:35.157187 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:29:35 crc kubenswrapper[4854]: I1125 11:29:35.157460 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="afbe3ed1-5005-490f-a465-9a711e8fa24e" path="/var/lib/kubelet/pods/afbe3ed1-5005-490f-a465-9a711e8fa24e/volumes" Nov 25 11:29:46 crc kubenswrapper[4854]: I1125 11:29:46.014087 4854 scope.go:117] "RemoveContainer" containerID="7f7b0bfefe09066bd006d68161d8598a1327dba11d008e89a0ee63321e865909" Nov 25 11:29:46 crc kubenswrapper[4854]: E1125 11:29:46.015202 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:30:00 crc kubenswrapper[4854]: I1125 11:30:00.013662 4854 scope.go:117] "RemoveContainer" containerID="7f7b0bfefe09066bd006d68161d8598a1327dba11d008e89a0ee63321e865909" Nov 25 11:30:00 crc kubenswrapper[4854]: E1125 11:30:00.014828 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:30:00 crc kubenswrapper[4854]: I1125 11:30:00.205990 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401170-8hpwh"] Nov 25 11:30:00 crc kubenswrapper[4854]: E1125 11:30:00.206515 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24759a3f-40c9-49c8-b29a-3ae94a21290d" containerName="extract-content" Nov 25 11:30:00 crc kubenswrapper[4854]: I1125 11:30:00.206543 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="24759a3f-40c9-49c8-b29a-3ae94a21290d" containerName="extract-content" Nov 25 11:30:00 crc kubenswrapper[4854]: E1125 11:30:00.206582 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24759a3f-40c9-49c8-b29a-3ae94a21290d" containerName="extract-utilities" Nov 25 11:30:00 crc kubenswrapper[4854]: I1125 11:30:00.206592 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="24759a3f-40c9-49c8-b29a-3ae94a21290d" containerName="extract-utilities" Nov 25 11:30:00 crc kubenswrapper[4854]: E1125 11:30:00.206623 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="889e88b1-8383-4bcd-82c5-8d0ce734518d" containerName="extract-content" Nov 25 11:30:00 crc kubenswrapper[4854]: I1125 11:30:00.206634 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="889e88b1-8383-4bcd-82c5-8d0ce734518d" containerName="extract-content" Nov 25 11:30:00 crc kubenswrapper[4854]: E1125 11:30:00.206652 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="afbe3ed1-5005-490f-a465-9a711e8fa24e" containerName="gather" Nov 25 11:30:00 crc kubenswrapper[4854]: I1125 11:30:00.206660 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="afbe3ed1-5005-490f-a465-9a711e8fa24e" containerName="gather" Nov 25 11:30:00 crc kubenswrapper[4854]: E1125 11:30:00.206690 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="889e88b1-8383-4bcd-82c5-8d0ce734518d" containerName="registry-server" Nov 25 11:30:00 crc kubenswrapper[4854]: I1125 11:30:00.206698 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="889e88b1-8383-4bcd-82c5-8d0ce734518d" containerName="registry-server" Nov 25 11:30:00 crc kubenswrapper[4854]: E1125 11:30:00.206728 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="afbe3ed1-5005-490f-a465-9a711e8fa24e" containerName="copy" Nov 25 11:30:00 crc kubenswrapper[4854]: I1125 11:30:00.206734 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="afbe3ed1-5005-490f-a465-9a711e8fa24e" containerName="copy" Nov 25 11:30:00 crc kubenswrapper[4854]: E1125 11:30:00.206745 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="889e88b1-8383-4bcd-82c5-8d0ce734518d" containerName="extract-utilities" Nov 25 11:30:00 crc kubenswrapper[4854]: I1125 11:30:00.206750 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="889e88b1-8383-4bcd-82c5-8d0ce734518d" containerName="extract-utilities" Nov 25 11:30:00 crc kubenswrapper[4854]: E1125 11:30:00.206762 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24759a3f-40c9-49c8-b29a-3ae94a21290d" containerName="registry-server" Nov 25 11:30:00 crc kubenswrapper[4854]: I1125 11:30:00.206769 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="24759a3f-40c9-49c8-b29a-3ae94a21290d" containerName="registry-server" Nov 25 11:30:00 crc kubenswrapper[4854]: I1125 11:30:00.207031 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="889e88b1-8383-4bcd-82c5-8d0ce734518d" containerName="registry-server" Nov 25 11:30:00 crc kubenswrapper[4854]: I1125 11:30:00.207068 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="24759a3f-40c9-49c8-b29a-3ae94a21290d" containerName="registry-server" Nov 25 11:30:00 crc kubenswrapper[4854]: I1125 11:30:00.207095 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="afbe3ed1-5005-490f-a465-9a711e8fa24e" containerName="copy" Nov 25 11:30:00 crc kubenswrapper[4854]: I1125 11:30:00.207112 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="afbe3ed1-5005-490f-a465-9a711e8fa24e" containerName="gather" Nov 25 11:30:00 crc kubenswrapper[4854]: I1125 11:30:00.208573 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401170-8hpwh" Nov 25 11:30:00 crc kubenswrapper[4854]: I1125 11:30:00.222761 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401170-8hpwh"] Nov 25 11:30:00 crc kubenswrapper[4854]: I1125 11:30:00.231797 4854 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 11:30:00 crc kubenswrapper[4854]: I1125 11:30:00.231807 4854 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 11:30:00 crc kubenswrapper[4854]: I1125 11:30:00.289992 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be-secret-volume\") pod \"collect-profiles-29401170-8hpwh\" (UID: \"7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401170-8hpwh" Nov 25 11:30:00 crc kubenswrapper[4854]: I1125 11:30:00.290080 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be-config-volume\") pod \"collect-profiles-29401170-8hpwh\" (UID: \"7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401170-8hpwh" Nov 25 11:30:00 crc kubenswrapper[4854]: I1125 11:30:00.290147 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9p76w\" (UniqueName: \"kubernetes.io/projected/7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be-kube-api-access-9p76w\") pod \"collect-profiles-29401170-8hpwh\" (UID: \"7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401170-8hpwh" Nov 25 11:30:00 crc kubenswrapper[4854]: I1125 11:30:00.393147 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9p76w\" (UniqueName: \"kubernetes.io/projected/7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be-kube-api-access-9p76w\") pod \"collect-profiles-29401170-8hpwh\" (UID: \"7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401170-8hpwh" Nov 25 11:30:00 crc kubenswrapper[4854]: I1125 11:30:00.393731 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be-secret-volume\") pod \"collect-profiles-29401170-8hpwh\" (UID: \"7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401170-8hpwh" Nov 25 11:30:00 crc kubenswrapper[4854]: I1125 11:30:00.393794 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be-config-volume\") pod \"collect-profiles-29401170-8hpwh\" (UID: \"7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401170-8hpwh" Nov 25 11:30:00 crc kubenswrapper[4854]: I1125 11:30:00.394890 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be-config-volume\") pod \"collect-profiles-29401170-8hpwh\" (UID: \"7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401170-8hpwh" Nov 25 11:30:00 crc kubenswrapper[4854]: I1125 11:30:00.400943 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be-secret-volume\") pod \"collect-profiles-29401170-8hpwh\" (UID: \"7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401170-8hpwh" Nov 25 11:30:00 crc kubenswrapper[4854]: I1125 11:30:00.410815 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9p76w\" (UniqueName: \"kubernetes.io/projected/7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be-kube-api-access-9p76w\") pod \"collect-profiles-29401170-8hpwh\" (UID: \"7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401170-8hpwh" Nov 25 11:30:00 crc kubenswrapper[4854]: I1125 11:30:00.536581 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401170-8hpwh" Nov 25 11:30:01 crc kubenswrapper[4854]: I1125 11:30:01.106584 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401170-8hpwh"] Nov 25 11:30:01 crc kubenswrapper[4854]: W1125 11:30:01.122794 4854 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7f5c9b18_986f_4bb1_a1c1_3bc53afcc0be.slice/crio-c4adf0af4d9920d6019e64657fa7aab49f05150d46b9cd88ad8bfaafb86e2be4 WatchSource:0}: Error finding container c4adf0af4d9920d6019e64657fa7aab49f05150d46b9cd88ad8bfaafb86e2be4: Status 404 returned error can't find the container with id c4adf0af4d9920d6019e64657fa7aab49f05150d46b9cd88ad8bfaafb86e2be4 Nov 25 11:30:01 crc kubenswrapper[4854]: I1125 11:30:01.388955 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401170-8hpwh" event={"ID":"7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be","Type":"ContainerStarted","Data":"c4adf0af4d9920d6019e64657fa7aab49f05150d46b9cd88ad8bfaafb86e2be4"} Nov 25 11:30:02 crc kubenswrapper[4854]: I1125 11:30:02.421259 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401170-8hpwh" event={"ID":"7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be","Type":"ContainerStarted","Data":"c0b83df5e35e619d116fc445195950987c73991dc14deaf2212e98a288147b8c"} Nov 25 11:30:02 crc kubenswrapper[4854]: I1125 11:30:02.455851 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29401170-8hpwh" podStartSLOduration=2.455830219 podStartE2EDuration="2.455830219s" podCreationTimestamp="2025-11-25 11:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 11:30:02.445078837 +0000 UTC m=+6808.298072213" watchObservedRunningTime="2025-11-25 11:30:02.455830219 +0000 UTC m=+6808.308823595" Nov 25 11:30:03 crc kubenswrapper[4854]: I1125 11:30:03.438019 4854 generic.go:334] "Generic (PLEG): container finished" podID="7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be" containerID="c0b83df5e35e619d116fc445195950987c73991dc14deaf2212e98a288147b8c" exitCode=0 Nov 25 11:30:03 crc kubenswrapper[4854]: I1125 11:30:03.438328 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401170-8hpwh" event={"ID":"7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be","Type":"ContainerDied","Data":"c0b83df5e35e619d116fc445195950987c73991dc14deaf2212e98a288147b8c"} Nov 25 11:30:04 crc kubenswrapper[4854]: I1125 11:30:04.923078 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401170-8hpwh" Nov 25 11:30:05 crc kubenswrapper[4854]: I1125 11:30:05.013262 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be-secret-volume\") pod \"7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be\" (UID: \"7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be\") " Nov 25 11:30:05 crc kubenswrapper[4854]: I1125 11:30:05.013370 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9p76w\" (UniqueName: \"kubernetes.io/projected/7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be-kube-api-access-9p76w\") pod \"7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be\" (UID: \"7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be\") " Nov 25 11:30:05 crc kubenswrapper[4854]: I1125 11:30:05.013435 4854 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be-config-volume\") pod \"7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be\" (UID: \"7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be\") " Nov 25 11:30:05 crc kubenswrapper[4854]: I1125 11:30:05.015172 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be-config-volume" (OuterVolumeSpecName: "config-volume") pod "7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be" (UID: "7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 11:30:05 crc kubenswrapper[4854]: I1125 11:30:05.022209 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be-kube-api-access-9p76w" (OuterVolumeSpecName: "kube-api-access-9p76w") pod "7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be" (UID: "7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be"). InnerVolumeSpecName "kube-api-access-9p76w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:30:05 crc kubenswrapper[4854]: I1125 11:30:05.028701 4854 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be" (UID: "7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:30:05 crc kubenswrapper[4854]: I1125 11:30:05.119770 4854 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 11:30:05 crc kubenswrapper[4854]: I1125 11:30:05.119819 4854 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9p76w\" (UniqueName: \"kubernetes.io/projected/7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be-kube-api-access-9p76w\") on node \"crc\" DevicePath \"\"" Nov 25 11:30:05 crc kubenswrapper[4854]: I1125 11:30:05.119833 4854 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 11:30:05 crc kubenswrapper[4854]: I1125 11:30:05.459776 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401170-8hpwh" event={"ID":"7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be","Type":"ContainerDied","Data":"c4adf0af4d9920d6019e64657fa7aab49f05150d46b9cd88ad8bfaafb86e2be4"} Nov 25 11:30:05 crc kubenswrapper[4854]: I1125 11:30:05.459824 4854 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c4adf0af4d9920d6019e64657fa7aab49f05150d46b9cd88ad8bfaafb86e2be4" Nov 25 11:30:05 crc kubenswrapper[4854]: I1125 11:30:05.459892 4854 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401170-8hpwh" Nov 25 11:30:05 crc kubenswrapper[4854]: I1125 11:30:05.573067 4854 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401125-6hftp"] Nov 25 11:30:05 crc kubenswrapper[4854]: I1125 11:30:05.585526 4854 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401125-6hftp"] Nov 25 11:30:07 crc kubenswrapper[4854]: I1125 11:30:07.049075 4854 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a" path="/var/lib/kubelet/pods/2b3ba2fb-2c4f-4ed6-a62f-abd9c752387a/volumes" Nov 25 11:30:15 crc kubenswrapper[4854]: I1125 11:30:15.023597 4854 scope.go:117] "RemoveContainer" containerID="7f7b0bfefe09066bd006d68161d8598a1327dba11d008e89a0ee63321e865909" Nov 25 11:30:15 crc kubenswrapper[4854]: E1125 11:30:15.024701 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:30:29 crc kubenswrapper[4854]: I1125 11:30:29.015272 4854 scope.go:117] "RemoveContainer" containerID="7f7b0bfefe09066bd006d68161d8598a1327dba11d008e89a0ee63321e865909" Nov 25 11:30:29 crc kubenswrapper[4854]: E1125 11:30:29.019665 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:30:44 crc kubenswrapper[4854]: I1125 11:30:44.013928 4854 scope.go:117] "RemoveContainer" containerID="7f7b0bfefe09066bd006d68161d8598a1327dba11d008e89a0ee63321e865909" Nov 25 11:30:44 crc kubenswrapper[4854]: E1125 11:30:44.014934 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9qdk4_openshift-machine-config-operator(aa43fdf8-0726-4b6e-bbda-2ac604e9eee0)\"" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" Nov 25 11:30:59 crc kubenswrapper[4854]: I1125 11:30:59.013658 4854 scope.go:117] "RemoveContainer" containerID="7f7b0bfefe09066bd006d68161d8598a1327dba11d008e89a0ee63321e865909" Nov 25 11:31:00 crc kubenswrapper[4854]: I1125 11:31:00.846259 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerStarted","Data":"d4629ad5cbfca6a18ad0fad79f579ee7d73371be636ce1cd84f7058db1bc6dc8"} Nov 25 11:31:02 crc kubenswrapper[4854]: I1125 11:31:02.037221 4854 scope.go:117] "RemoveContainer" containerID="4c50fd5b1ef7224513e87fdd8542a92637bec70acd5fb40fcf2ed26b9675dd04" Nov 25 11:33:25 crc kubenswrapper[4854]: I1125 11:33:25.029283 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 11:33:25 crc kubenswrapper[4854]: I1125 11:33:25.030346 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 11:33:55 crc kubenswrapper[4854]: I1125 11:33:55.029087 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 11:33:55 crc kubenswrapper[4854]: I1125 11:33:55.029626 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 11:33:55 crc kubenswrapper[4854]: I1125 11:33:55.700387 4854 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-mmf6c"] Nov 25 11:33:55 crc kubenswrapper[4854]: E1125 11:33:55.701880 4854 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be" containerName="collect-profiles" Nov 25 11:33:55 crc kubenswrapper[4854]: I1125 11:33:55.701916 4854 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be" containerName="collect-profiles" Nov 25 11:33:55 crc kubenswrapper[4854]: I1125 11:33:55.702738 4854 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f5c9b18-986f-4bb1-a1c1-3bc53afcc0be" containerName="collect-profiles" Nov 25 11:33:55 crc kubenswrapper[4854]: I1125 11:33:55.710183 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mmf6c" Nov 25 11:33:55 crc kubenswrapper[4854]: I1125 11:33:55.764821 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f2033d4a-e7a4-4578-a114-96b1b6f2c4e0-utilities\") pod \"redhat-operators-mmf6c\" (UID: \"f2033d4a-e7a4-4578-a114-96b1b6f2c4e0\") " pod="openshift-marketplace/redhat-operators-mmf6c" Nov 25 11:33:55 crc kubenswrapper[4854]: I1125 11:33:55.764884 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f2033d4a-e7a4-4578-a114-96b1b6f2c4e0-catalog-content\") pod \"redhat-operators-mmf6c\" (UID: \"f2033d4a-e7a4-4578-a114-96b1b6f2c4e0\") " pod="openshift-marketplace/redhat-operators-mmf6c" Nov 25 11:33:55 crc kubenswrapper[4854]: I1125 11:33:55.765047 4854 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jh54d\" (UniqueName: \"kubernetes.io/projected/f2033d4a-e7a4-4578-a114-96b1b6f2c4e0-kube-api-access-jh54d\") pod \"redhat-operators-mmf6c\" (UID: \"f2033d4a-e7a4-4578-a114-96b1b6f2c4e0\") " pod="openshift-marketplace/redhat-operators-mmf6c" Nov 25 11:33:55 crc kubenswrapper[4854]: I1125 11:33:55.803491 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mmf6c"] Nov 25 11:33:55 crc kubenswrapper[4854]: I1125 11:33:55.868305 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jh54d\" (UniqueName: \"kubernetes.io/projected/f2033d4a-e7a4-4578-a114-96b1b6f2c4e0-kube-api-access-jh54d\") pod \"redhat-operators-mmf6c\" (UID: \"f2033d4a-e7a4-4578-a114-96b1b6f2c4e0\") " pod="openshift-marketplace/redhat-operators-mmf6c" Nov 25 11:33:55 crc kubenswrapper[4854]: I1125 11:33:55.868531 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f2033d4a-e7a4-4578-a114-96b1b6f2c4e0-utilities\") pod \"redhat-operators-mmf6c\" (UID: \"f2033d4a-e7a4-4578-a114-96b1b6f2c4e0\") " pod="openshift-marketplace/redhat-operators-mmf6c" Nov 25 11:33:55 crc kubenswrapper[4854]: I1125 11:33:55.868563 4854 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f2033d4a-e7a4-4578-a114-96b1b6f2c4e0-catalog-content\") pod \"redhat-operators-mmf6c\" (UID: \"f2033d4a-e7a4-4578-a114-96b1b6f2c4e0\") " pod="openshift-marketplace/redhat-operators-mmf6c" Nov 25 11:33:55 crc kubenswrapper[4854]: I1125 11:33:55.869029 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f2033d4a-e7a4-4578-a114-96b1b6f2c4e0-catalog-content\") pod \"redhat-operators-mmf6c\" (UID: \"f2033d4a-e7a4-4578-a114-96b1b6f2c4e0\") " pod="openshift-marketplace/redhat-operators-mmf6c" Nov 25 11:33:55 crc kubenswrapper[4854]: I1125 11:33:55.869128 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f2033d4a-e7a4-4578-a114-96b1b6f2c4e0-utilities\") pod \"redhat-operators-mmf6c\" (UID: \"f2033d4a-e7a4-4578-a114-96b1b6f2c4e0\") " pod="openshift-marketplace/redhat-operators-mmf6c" Nov 25 11:33:55 crc kubenswrapper[4854]: I1125 11:33:55.908696 4854 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jh54d\" (UniqueName: \"kubernetes.io/projected/f2033d4a-e7a4-4578-a114-96b1b6f2c4e0-kube-api-access-jh54d\") pod \"redhat-operators-mmf6c\" (UID: \"f2033d4a-e7a4-4578-a114-96b1b6f2c4e0\") " pod="openshift-marketplace/redhat-operators-mmf6c" Nov 25 11:33:56 crc kubenswrapper[4854]: I1125 11:33:56.041054 4854 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mmf6c" Nov 25 11:33:56 crc kubenswrapper[4854]: I1125 11:33:56.915971 4854 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mmf6c"] Nov 25 11:33:57 crc kubenswrapper[4854]: I1125 11:33:57.000663 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mmf6c" event={"ID":"f2033d4a-e7a4-4578-a114-96b1b6f2c4e0","Type":"ContainerStarted","Data":"4423f005dd6608fd5e3bce6d4b3f2ccb69b6e5417e7278cfa03611e0748771b3"} Nov 25 11:33:58 crc kubenswrapper[4854]: I1125 11:33:58.015512 4854 generic.go:334] "Generic (PLEG): container finished" podID="f2033d4a-e7a4-4578-a114-96b1b6f2c4e0" containerID="7db586b24d76c602e6cae7738a02185fd74a4c0da42d57c912bf4afb3aa302f8" exitCode=0 Nov 25 11:33:58 crc kubenswrapper[4854]: I1125 11:33:58.015592 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mmf6c" event={"ID":"f2033d4a-e7a4-4578-a114-96b1b6f2c4e0","Type":"ContainerDied","Data":"7db586b24d76c602e6cae7738a02185fd74a4c0da42d57c912bf4afb3aa302f8"} Nov 25 11:33:58 crc kubenswrapper[4854]: I1125 11:33:58.018855 4854 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 11:34:13 crc kubenswrapper[4854]: E1125 11:34:13.935100 4854 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 25 11:34:13 crc kubenswrapper[4854]: E1125 11:34:13.939705 4854 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jh54d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-mmf6c_openshift-marketplace(f2033d4a-e7a4-4578-a114-96b1b6f2c4e0): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 11:34:13 crc kubenswrapper[4854]: E1125 11:34:13.941041 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-mmf6c" podUID="f2033d4a-e7a4-4578-a114-96b1b6f2c4e0" Nov 25 11:34:14 crc kubenswrapper[4854]: E1125 11:34:14.317761 4854 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-mmf6c" podUID="f2033d4a-e7a4-4578-a114-96b1b6f2c4e0" Nov 25 11:34:25 crc kubenswrapper[4854]: I1125 11:34:25.028564 4854 patch_prober.go:28] interesting pod/machine-config-daemon-9qdk4 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 11:34:25 crc kubenswrapper[4854]: I1125 11:34:25.029236 4854 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 11:34:25 crc kubenswrapper[4854]: I1125 11:34:25.029283 4854 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" Nov 25 11:34:25 crc kubenswrapper[4854]: I1125 11:34:25.030160 4854 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d4629ad5cbfca6a18ad0fad79f579ee7d73371be636ce1cd84f7058db1bc6dc8"} pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 11:34:25 crc kubenswrapper[4854]: I1125 11:34:25.030227 4854 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" podUID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerName="machine-config-daemon" containerID="cri-o://d4629ad5cbfca6a18ad0fad79f579ee7d73371be636ce1cd84f7058db1bc6dc8" gracePeriod=600 Nov 25 11:34:25 crc kubenswrapper[4854]: I1125 11:34:25.429883 4854 generic.go:334] "Generic (PLEG): container finished" podID="aa43fdf8-0726-4b6e-bbda-2ac604e9eee0" containerID="d4629ad5cbfca6a18ad0fad79f579ee7d73371be636ce1cd84f7058db1bc6dc8" exitCode=0 Nov 25 11:34:25 crc kubenswrapper[4854]: I1125 11:34:25.429951 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerDied","Data":"d4629ad5cbfca6a18ad0fad79f579ee7d73371be636ce1cd84f7058db1bc6dc8"} Nov 25 11:34:25 crc kubenswrapper[4854]: I1125 11:34:25.429993 4854 scope.go:117] "RemoveContainer" containerID="7f7b0bfefe09066bd006d68161d8598a1327dba11d008e89a0ee63321e865909" Nov 25 11:34:26 crc kubenswrapper[4854]: I1125 11:34:26.442750 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9qdk4" event={"ID":"aa43fdf8-0726-4b6e-bbda-2ac604e9eee0","Type":"ContainerStarted","Data":"e3e3ef17c79b50d27fcab885d0b7aaf5a3c2f732d9f96c83155cd96e05af9d1e"} Nov 25 11:34:31 crc kubenswrapper[4854]: I1125 11:34:31.496701 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mmf6c" event={"ID":"f2033d4a-e7a4-4578-a114-96b1b6f2c4e0","Type":"ContainerStarted","Data":"b085b49505968a4a4605ac5aee9d35a866d6000e912e7e8f11f2a57cc6cbdd47"} Nov 25 11:34:50 crc kubenswrapper[4854]: I1125 11:34:50.721862 4854 generic.go:334] "Generic (PLEG): container finished" podID="f2033d4a-e7a4-4578-a114-96b1b6f2c4e0" containerID="b085b49505968a4a4605ac5aee9d35a866d6000e912e7e8f11f2a57cc6cbdd47" exitCode=0 Nov 25 11:34:50 crc kubenswrapper[4854]: I1125 11:34:50.721951 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mmf6c" event={"ID":"f2033d4a-e7a4-4578-a114-96b1b6f2c4e0","Type":"ContainerDied","Data":"b085b49505968a4a4605ac5aee9d35a866d6000e912e7e8f11f2a57cc6cbdd47"} Nov 25 11:34:51 crc kubenswrapper[4854]: I1125 11:34:51.735867 4854 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mmf6c" event={"ID":"f2033d4a-e7a4-4578-a114-96b1b6f2c4e0","Type":"ContainerStarted","Data":"831984ac53c2a37e42c913b573ef0d517bd0c43b99568e1614fc0f48c3e241d3"} Nov 25 11:34:51 crc kubenswrapper[4854]: I1125 11:34:51.769910 4854 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-mmf6c" podStartSLOduration=3.484655119 podStartE2EDuration="56.7698831s" podCreationTimestamp="2025-11-25 11:33:55 +0000 UTC" firstStartedPulling="2025-11-25 11:33:58.018404777 +0000 UTC m=+7043.871398153" lastFinishedPulling="2025-11-25 11:34:51.303632758 +0000 UTC m=+7097.156626134" observedRunningTime="2025-11-25 11:34:51.756770773 +0000 UTC m=+7097.609764179" watchObservedRunningTime="2025-11-25 11:34:51.7698831 +0000 UTC m=+7097.622876476" Nov 25 11:34:56 crc kubenswrapper[4854]: I1125 11:34:56.042106 4854 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-mmf6c" Nov 25 11:34:56 crc kubenswrapper[4854]: I1125 11:34:56.043555 4854 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-mmf6c" Nov 25 11:34:57 crc kubenswrapper[4854]: I1125 11:34:57.119293 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-mmf6c" podUID="f2033d4a-e7a4-4578-a114-96b1b6f2c4e0" containerName="registry-server" probeResult="failure" output=< Nov 25 11:34:57 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:34:57 crc kubenswrapper[4854]: > Nov 25 11:35:07 crc kubenswrapper[4854]: I1125 11:35:07.101041 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-mmf6c" podUID="f2033d4a-e7a4-4578-a114-96b1b6f2c4e0" containerName="registry-server" probeResult="failure" output=< Nov 25 11:35:07 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:35:07 crc kubenswrapper[4854]: > Nov 25 11:35:18 crc kubenswrapper[4854]: I1125 11:35:18.491600 4854 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-mmf6c" podUID="f2033d4a-e7a4-4578-a114-96b1b6f2c4e0" containerName="registry-server" probeResult="failure" output=< Nov 25 11:35:18 crc kubenswrapper[4854]: timeout: failed to connect service ":50051" within 1s Nov 25 11:35:18 crc kubenswrapper[4854]: > var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515111312200024427 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015111312201017345 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015111273675016515 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015111273675015465 5ustar corecore